file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X!= Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X!= Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X!= C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy {
fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c};
let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
}
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() |
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests;
| {
self.unregister();
self.y().remove(self.x().min() - self.c)
} | conditional_block |
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X!= Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X!= Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn | (model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X!= C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy {
fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c};
let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
}
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() {
self.unregister();
self.y().remove(self.x().min() - self.c)
}
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests;
| new | identifier_name |
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X!= Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X!= Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X!= C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy { | let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
}
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() {
self.unregister();
self.y().remove(self.x().min() - self.c)
}
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests; | fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c}; | random_line_split |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => {
let _ = t.write(json.as_bytes());
}
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn get_string(&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
} |
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
} | random_line_split |
|
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => {
let _ = t.write(json.as_bytes());
}
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool | Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn get_string(&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
}
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
}
| {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) { | identifier_body |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => {
let _ = t.write(json.as_bytes());
}
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn | (&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
}
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
}
| get_string | identifier_name |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => |
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn get_string(&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
}
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
}
| {
let _ = t.write(json.as_bytes());
} | conditional_block |
unix.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Named pipes
This module contains the ability to communicate over named pipes with
synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
while on Unix it corresponds to UNIX domain sockets.
These pipes are similar to TCP in the sense that you can have both a stream to a
server and a server itself. The server provided accepts other `UnixStream`
instances as clients.
*/
#![allow(missing_doc)]
use prelude::*;
use c_str::ToCStr;
use clone::Clone;
use io::pipe::PipeStream;
use io::{Listener, Acceptor, Reader, Writer, IoResult};
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
use rt::rtio::{RtioUnixAcceptor, RtioPipe};
/// A stream which communicates over a named pipe.
pub struct UnixStream {
obj: PipeStream,
}
impl UnixStream {
fn new(obj: ~RtioPipe:Send) -> UnixStream {
UnixStream { obj: PipeStream::new(obj) }
}
/// Connect to a pipe named by `path`. This will attempt to open a
/// connection to the underlying socket.
///
/// The returned stream will be closed when the object falls out of scope.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixStream;
///
/// let server = Path::new("path/to/my/socket");
/// let mut stream = UnixStream::connect(&server);
/// stream.write([1, 2, 3]);
/// ```
pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
impl Clone for UnixStream {
fn clone(&self) -> UnixStream {
UnixStream { obj: self.obj.clone() }
}
}
impl Reader for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.obj.read(buf) }
}
impl Writer for UnixStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.obj.write(buf) }
}
/// A value that can listen for incoming named pipe connection requests.
pub struct UnixListener {
/// The internal, opaque runtime Unix listener.
obj: ~RtioUnixListener:Send,
}
impl UnixListener {
/// Creates a new listener, ready to receive incoming connections on the
/// specified socket. The server will be named by `path`.
///
/// This listener will be closed when it falls out of scope.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixListener;
/// use std::io::{Listener, Acceptor};
///
/// let server = Path::new("/path/to/my/socket");
/// let stream = UnixListener::bind(&server);
/// for mut client in stream.listen().incoming() {
/// client.write([1, 2, 3, 4]);
/// }
/// # }
/// ```
pub fn bind<P: ToCStr>(path: &P) -> IoResult<UnixListener> {
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
impl Listener<UnixStream, UnixAcceptor> for UnixListener {
fn listen(self) -> IoResult<UnixAcceptor> {
self.obj.listen().map(|obj| UnixAcceptor { obj: obj })
}
}
/// A value that can accept named pipe connections, returned from `listen()`.
pub struct UnixAcceptor {
/// The internal, opaque runtime Unix acceptor.
obj: ~RtioUnixAcceptor:Send,
}
impl Acceptor<UnixStream> for UnixAcceptor {
fn accept(&mut self) -> IoResult<UnixStream> {
self.obj.accept().map(UnixStream::new)
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use io::*;
use io::test::*;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = UnixListener::bind(&path1).listen();
spawn(proc() {
match UnixStream::connect(&path2) {
Ok(c) => client(c),
Err(e) => fail!("failed connect: {}", e),
}
});
match acceptor.accept() {
Ok(c) => server(c),
Err(e) => fail!("failed accept: {}", e),
}
}
iotest!(fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
e.kind == InvalidInput);
}
}
})
iotest!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match UnixStream::connect(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
iotest!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write([99]).unwrap();
})
})
iotest!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
assert!(server.read(buf).is_err());
}, proc(_client) {
// drop the client
})
} #[ignore(cfg(windows))]) // FIXME(#12516)
iotest!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.write(buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind == BrokenPipe ||
e.kind == NotConnected ||
e.kind == ConnectionReset,
"unknown error {:?}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
iotest!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match UnixListener::bind(&path1).listen() {
Ok(a) => a,
Err(e) => fail!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0, times) {
let mut stream = UnixStream::connect(&path2);
match stream.write([100]) {
Ok(..) => {}
Err(e) => fail!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept();
let mut buf = [0];
match client.read(buf) {
Ok(..) => {}
Err(e) => fail!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
iotest!(fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
iotest!(fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 0];
debug!("client reading");
assert_eq!(s.read(buf), Ok(1));
assert_eq!(buf[0], 1);
debug!("client writing");
s.write([2]).unwrap();
debug!("client dropping");
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
debug!("writer writing");
s2.write([1]).unwrap();
debug!("writer done");
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
debug!("reader reading");
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
iotest!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
s.write([1]).unwrap();
rx.recv();
s.write([2]).unwrap();
rx.recv();
});
| let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(buf).unwrap();
tx1.send(());
rx.recv();
})
iotest!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 1];
s.read(buf).unwrap();
s.read(buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write([1]).unwrap();
tx.send(());
});
s1.write([2]).unwrap();
rx.recv();
})
} | let mut s1 = acceptor.accept().unwrap(); | random_line_split |
unix.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Named pipes
This module contains the ability to communicate over named pipes with
synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
while on Unix it corresponds to UNIX domain sockets.
These pipes are similar to TCP in the sense that you can have both a stream to a
server and a server itself. The server provided accepts other `UnixStream`
instances as clients.
*/
#![allow(missing_doc)]
use prelude::*;
use c_str::ToCStr;
use clone::Clone;
use io::pipe::PipeStream;
use io::{Listener, Acceptor, Reader, Writer, IoResult};
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
use rt::rtio::{RtioUnixAcceptor, RtioPipe};
/// A stream which communicates over a named pipe.
pub struct UnixStream {
obj: PipeStream,
}
impl UnixStream {
fn new(obj: ~RtioPipe:Send) -> UnixStream {
UnixStream { obj: PipeStream::new(obj) }
}
/// Connect to a pipe named by `path`. This will attempt to open a
/// connection to the underlying socket.
///
/// The returned stream will be closed when the object falls out of scope.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixStream;
///
/// let server = Path::new("path/to/my/socket");
/// let mut stream = UnixStream::connect(&server);
/// stream.write([1, 2, 3]);
/// ```
pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
impl Clone for UnixStream {
fn clone(&self) -> UnixStream |
}
impl Reader for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.obj.read(buf) }
}
impl Writer for UnixStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.obj.write(buf) }
}
/// A value that can listen for incoming named pipe connection requests.
pub struct UnixListener {
/// The internal, opaque runtime Unix listener.
obj: ~RtioUnixListener:Send,
}
impl UnixListener {
/// Creates a new listener, ready to receive incoming connections on the
/// specified socket. The server will be named by `path`.
///
/// This listener will be closed when it falls out of scope.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixListener;
/// use std::io::{Listener, Acceptor};
///
/// let server = Path::new("/path/to/my/socket");
/// let stream = UnixListener::bind(&server);
/// for mut client in stream.listen().incoming() {
/// client.write([1, 2, 3, 4]);
/// }
/// # }
/// ```
pub fn bind<P: ToCStr>(path: &P) -> IoResult<UnixListener> {
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
impl Listener<UnixStream, UnixAcceptor> for UnixListener {
fn listen(self) -> IoResult<UnixAcceptor> {
self.obj.listen().map(|obj| UnixAcceptor { obj: obj })
}
}
/// A value that can accept named pipe connections, returned from `listen()`.
pub struct UnixAcceptor {
/// The internal, opaque runtime Unix acceptor.
obj: ~RtioUnixAcceptor:Send,
}
impl Acceptor<UnixStream> for UnixAcceptor {
fn accept(&mut self) -> IoResult<UnixStream> {
self.obj.accept().map(UnixStream::new)
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use io::*;
use io::test::*;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = UnixListener::bind(&path1).listen();
spawn(proc() {
match UnixStream::connect(&path2) {
Ok(c) => client(c),
Err(e) => fail!("failed connect: {}", e),
}
});
match acceptor.accept() {
Ok(c) => server(c),
Err(e) => fail!("failed accept: {}", e),
}
}
iotest!(fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
e.kind == InvalidInput);
}
}
})
iotest!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match UnixStream::connect(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
iotest!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write([99]).unwrap();
})
})
iotest!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
assert!(server.read(buf).is_err());
}, proc(_client) {
// drop the client
})
} #[ignore(cfg(windows))]) // FIXME(#12516)
iotest!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.write(buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind == BrokenPipe ||
e.kind == NotConnected ||
e.kind == ConnectionReset,
"unknown error {:?}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
iotest!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match UnixListener::bind(&path1).listen() {
Ok(a) => a,
Err(e) => fail!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0, times) {
let mut stream = UnixStream::connect(&path2);
match stream.write([100]) {
Ok(..) => {}
Err(e) => fail!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept();
let mut buf = [0];
match client.read(buf) {
Ok(..) => {}
Err(e) => fail!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
iotest!(fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
iotest!(fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 0];
debug!("client reading");
assert_eq!(s.read(buf), Ok(1));
assert_eq!(buf[0], 1);
debug!("client writing");
s.write([2]).unwrap();
debug!("client dropping");
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
debug!("writer writing");
s2.write([1]).unwrap();
debug!("writer done");
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
debug!("reader reading");
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
iotest!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
s.write([1]).unwrap();
rx.recv();
s.write([2]).unwrap();
rx.recv();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(buf).unwrap();
tx1.send(());
rx.recv();
})
iotest!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 1];
s.read(buf).unwrap();
s.read(buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write([1]).unwrap();
tx.send(());
});
s1.write([2]).unwrap();
rx.recv();
})
}
| {
UnixStream { obj: self.obj.clone() }
} | identifier_body |
unix.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Named pipes
This module contains the ability to communicate over named pipes with
synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
while on Unix it corresponds to UNIX domain sockets.
These pipes are similar to TCP in the sense that you can have both a stream to a
server and a server itself. The server provided accepts other `UnixStream`
instances as clients.
*/
#![allow(missing_doc)]
use prelude::*;
use c_str::ToCStr;
use clone::Clone;
use io::pipe::PipeStream;
use io::{Listener, Acceptor, Reader, Writer, IoResult};
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
use rt::rtio::{RtioUnixAcceptor, RtioPipe};
/// A stream which communicates over a named pipe.
pub struct UnixStream {
obj: PipeStream,
}
impl UnixStream {
fn new(obj: ~RtioPipe:Send) -> UnixStream {
UnixStream { obj: PipeStream::new(obj) }
}
/// Connect to a pipe named by `path`. This will attempt to open a
/// connection to the underlying socket.
///
/// The returned stream will be closed when the object falls out of scope.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixStream;
///
/// let server = Path::new("path/to/my/socket");
/// let mut stream = UnixStream::connect(&server);
/// stream.write([1, 2, 3]);
/// ```
pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
impl Clone for UnixStream {
fn clone(&self) -> UnixStream {
UnixStream { obj: self.obj.clone() }
}
}
impl Reader for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.obj.read(buf) }
}
impl Writer for UnixStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.obj.write(buf) }
}
/// A value that can listen for incoming named pipe connection requests.
pub struct UnixListener {
/// The internal, opaque runtime Unix listener.
obj: ~RtioUnixListener:Send,
}
impl UnixListener {
/// Creates a new listener, ready to receive incoming connections on the
/// specified socket. The server will be named by `path`.
///
/// This listener will be closed when it falls out of scope.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixListener;
/// use std::io::{Listener, Acceptor};
///
/// let server = Path::new("/path/to/my/socket");
/// let stream = UnixListener::bind(&server);
/// for mut client in stream.listen().incoming() {
/// client.write([1, 2, 3, 4]);
/// }
/// # }
/// ```
pub fn | <P: ToCStr>(path: &P) -> IoResult<UnixListener> {
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
impl Listener<UnixStream, UnixAcceptor> for UnixListener {
fn listen(self) -> IoResult<UnixAcceptor> {
self.obj.listen().map(|obj| UnixAcceptor { obj: obj })
}
}
/// A value that can accept named pipe connections, returned from `listen()`.
pub struct UnixAcceptor {
/// The internal, opaque runtime Unix acceptor.
obj: ~RtioUnixAcceptor:Send,
}
impl Acceptor<UnixStream> for UnixAcceptor {
fn accept(&mut self) -> IoResult<UnixStream> {
self.obj.accept().map(UnixStream::new)
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use io::*;
use io::test::*;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = UnixListener::bind(&path1).listen();
spawn(proc() {
match UnixStream::connect(&path2) {
Ok(c) => client(c),
Err(e) => fail!("failed connect: {}", e),
}
});
match acceptor.accept() {
Ok(c) => server(c),
Err(e) => fail!("failed accept: {}", e),
}
}
iotest!(fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
e.kind == InvalidInput);
}
}
})
iotest!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match UnixStream::connect(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
iotest!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write([99]).unwrap();
})
})
iotest!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
assert!(server.read(buf).is_err());
}, proc(_client) {
// drop the client
})
} #[ignore(cfg(windows))]) // FIXME(#12516)
iotest!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.write(buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind == BrokenPipe ||
e.kind == NotConnected ||
e.kind == ConnectionReset,
"unknown error {:?}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
iotest!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match UnixListener::bind(&path1).listen() {
Ok(a) => a,
Err(e) => fail!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0, times) {
let mut stream = UnixStream::connect(&path2);
match stream.write([100]) {
Ok(..) => {}
Err(e) => fail!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept();
let mut buf = [0];
match client.read(buf) {
Ok(..) => {}
Err(e) => fail!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
iotest!(fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
iotest!(fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 0];
debug!("client reading");
assert_eq!(s.read(buf), Ok(1));
assert_eq!(buf[0], 1);
debug!("client writing");
s.write([2]).unwrap();
debug!("client dropping");
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
debug!("writer writing");
s2.write([1]).unwrap();
debug!("writer done");
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
debug!("reader reading");
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
iotest!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
s.write([1]).unwrap();
rx.recv();
s.write([2]).unwrap();
rx.recv();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(buf).unwrap();
tx1.send(());
rx.recv();
})
iotest!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 1];
s.read(buf).unwrap();
s.read(buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write([1]).unwrap();
tx.send(());
});
s1.write([2]).unwrap();
rx.recv();
})
}
| bind | identifier_name |
numeric.rs | use backend::Backend;
use expression::{Expression, SelectableExpression, NonAggregate};
use query_builder::*;
use types;
macro_rules! numeric_operation {
($name:ident, $op:expr) => {
pub struct $name<Lhs, Rhs> {
lhs: Lhs,
rhs: Rhs,
}
impl<Lhs, Rhs> $name<Lhs, Rhs> {
pub fn new(left: Lhs, right: Rhs) -> Self {
$name {
lhs: left,
rhs: right,
}
}
}
impl<Lhs, Rhs> Expression for $name<Lhs, Rhs> where
Lhs: Expression,
Lhs::SqlType: types::ops::$name,
Rhs: Expression,
{
type SqlType = <Lhs::SqlType as types::ops::$name>::Output;
}
impl<Lhs, Rhs, DB> QueryFragment<DB> for $name<Lhs, Rhs> where
DB: Backend,
Lhs: QueryFragment<DB>,
Rhs: QueryFragment<DB>,
{
fn to_sql(&self, out: &mut DB::QueryBuilder) -> BuildQueryResult {
try!(self.lhs.to_sql(out));
out.push_sql($op);
self.rhs.to_sql(out)
}
}
impl<Lhs, Rhs, QS> SelectableExpression<QS> for $name<Lhs, Rhs> where
Lhs: SelectableExpression<QS>,
Rhs: SelectableExpression<QS>,
$name<Lhs, Rhs>: Expression,
{
}
impl<Lhs, Rhs> NonAggregate for $name<Lhs, Rhs> where
Lhs: NonAggregate,
Rhs: NonAggregate,
$name<Lhs, Rhs>: Expression,
{
}
generic_numeric_expr!($name, A, B);
}
} |
numeric_operation!(Add, " + ");
numeric_operation!(Sub, " - ");
numeric_operation!(Mul, " * ");
numeric_operation!(Div, " / "); | random_line_split |
|
auth.rs | // Copyleft (ↄ) meh. <[email protected]> | http://meh.schizofreni.co
//
// This file is part of screenruster.
//
// screenruster is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// screenruster is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with screenruster. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::ops::Deref;
use channel::{self, Receiver, Sender, SendError};
use users;
use log::warn;
use crate::error;
use crate::config;
use super::Authenticate;
pub struct Auth {
receiver: Receiver<Response>,
sender: Sender<Request>,
}
#[derive(Clone, Debug)]
pub enum Request {
Authenticate(String),
}
#[derive(Clone, Debug)]
pub enum Response {
Success,
Failure,
}
impl Auth {
pub fn spawn(config: config::Auth) -> error::Result<Auth> {
let user = users::get_current_username().ok_or(error::Auth::UnknownUser)?;
let mut methods = Vec::<Box<dyn Authenticate>>::new();
#[cfg(feature = "auth-internal")]
methods.push(box super::internal::new(config.get("internal"))?);
#[cfg(feature = "auth-pam")]
methods.push(Box::new(super::pam::new(config.get("pam"))?));
let (sender, i_receiver) = channel::unbounded();
let (i_sender, receiver) = channel::unbounded();
thread::spawn(move || {
'main: while let Ok(request) = receiver.recv() {
match request {
Request::Authenticate(password) => {
| }
}
});
Ok(Auth {
receiver: i_receiver,
sender: i_sender,
})
}
pub fn authenticate<S: Into<String>>(&self, password: S) -> Result<(), SendError<Request>> {
self.sender.send(Request::Authenticate(password.into()))
}
}
impl Deref for Auth {
type Target = Receiver<Response>;
fn deref(&self) -> &Receiver<Response> {
&self.receiver
}
}
| if methods.is_empty() {
warn!("no authentication method");
sender.send(Response::Success).unwrap();
continue 'main;
}
for method in &mut methods {
if let Ok(true) = method.authenticate(user.to_str().unwrap(), &password) {
sender.send(Response::Success).unwrap();
continue 'main;
}
}
sender.send(Response::Failure).unwrap();
}
| conditional_block |
auth.rs | // Copyleft (ↄ) meh. <[email protected]> | http://meh.schizofreni.co
//
// This file is part of screenruster.
//
// screenruster is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. | // screenruster is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with screenruster. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::ops::Deref;
use channel::{self, Receiver, Sender, SendError};
use users;
use log::warn;
use crate::error;
use crate::config;
use super::Authenticate;
pub struct Auth {
receiver: Receiver<Response>,
sender: Sender<Request>,
}
#[derive(Clone, Debug)]
pub enum Request {
Authenticate(String),
}
#[derive(Clone, Debug)]
pub enum Response {
Success,
Failure,
}
impl Auth {
pub fn spawn(config: config::Auth) -> error::Result<Auth> {
let user = users::get_current_username().ok_or(error::Auth::UnknownUser)?;
let mut methods = Vec::<Box<dyn Authenticate>>::new();
#[cfg(feature = "auth-internal")]
methods.push(box super::internal::new(config.get("internal"))?);
#[cfg(feature = "auth-pam")]
methods.push(Box::new(super::pam::new(config.get("pam"))?));
let (sender, i_receiver) = channel::unbounded();
let (i_sender, receiver) = channel::unbounded();
thread::spawn(move || {
'main: while let Ok(request) = receiver.recv() {
match request {
Request::Authenticate(password) => {
if methods.is_empty() {
warn!("no authentication method");
sender.send(Response::Success).unwrap();
continue'main;
}
for method in &mut methods {
if let Ok(true) = method.authenticate(user.to_str().unwrap(), &password) {
sender.send(Response::Success).unwrap();
continue'main;
}
}
sender.send(Response::Failure).unwrap();
}
}
}
});
Ok(Auth {
receiver: i_receiver,
sender: i_sender,
})
}
pub fn authenticate<S: Into<String>>(&self, password: S) -> Result<(), SendError<Request>> {
self.sender.send(Request::Authenticate(password.into()))
}
}
impl Deref for Auth {
type Target = Receiver<Response>;
fn deref(&self) -> &Receiver<Response> {
&self.receiver
}
} | // | random_line_split |
auth.rs | // Copyleft (ↄ) meh. <[email protected]> | http://meh.schizofreni.co
//
// This file is part of screenruster.
//
// screenruster is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// screenruster is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with screenruster. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::ops::Deref;
use channel::{self, Receiver, Sender, SendError};
use users;
use log::warn;
use crate::error;
use crate::config;
use super::Authenticate;
pub struct Auth {
receiver: Receiver<Response>,
sender: Sender<Request>,
}
#[derive(Clone, Debug)]
pub enum Request {
Authenticate(String),
}
#[derive(Clone, Debug)]
pub enum Response {
Success,
Failure,
}
impl Auth {
pub fn spawn(config: config::Auth) -> error::Result<Auth> {
let user = users::get_current_username().ok_or(error::Auth::UnknownUser)?;
let mut methods = Vec::<Box<dyn Authenticate>>::new();
#[cfg(feature = "auth-internal")]
methods.push(box super::internal::new(config.get("internal"))?);
#[cfg(feature = "auth-pam")]
methods.push(Box::new(super::pam::new(config.get("pam"))?));
let (sender, i_receiver) = channel::unbounded();
let (i_sender, receiver) = channel::unbounded();
thread::spawn(move || {
'main: while let Ok(request) = receiver.recv() {
match request {
Request::Authenticate(password) => {
if methods.is_empty() {
warn!("no authentication method");
sender.send(Response::Success).unwrap();
continue'main;
}
for method in &mut methods {
if let Ok(true) = method.authenticate(user.to_str().unwrap(), &password) {
sender.send(Response::Success).unwrap();
continue'main;
}
}
sender.send(Response::Failure).unwrap();
}
}
}
});
Ok(Auth {
receiver: i_receiver,
sender: i_sender,
})
}
pub fn au | : Into<String>>(&self, password: S) -> Result<(), SendError<Request>> {
self.sender.send(Request::Authenticate(password.into()))
}
}
impl Deref for Auth {
type Target = Receiver<Response>;
fn deref(&self) -> &Receiver<Response> {
&self.receiver
}
}
| thenticate<S | identifier_name |
util.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::config;
#[cfg(target_os = "win32")]
use std::os::getenv;
/// Conversion table from triple OS name to Rust SYSNAME
static OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "win32"),
("win32", "win32"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE.iter() {
if triple.contains(triple_os) {
return os
}
}
fail!("Cannot determine OS from triple");
}
#[cfg(target_os = "win32")]
pub fn make_new_path(path: &str) -> ~str {
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match getenv(lib_path_env_var()) {
Some(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
None => path.to_str()
}
}
#[cfg(target_os = "win32")]
pub fn lib_path_env_var() -> ~str { ~"PATH" }
#[cfg(target_os = "win32")]
pub fn path_div() -> ~str |
}
| { ~";" }
pub fn logv(config: &config, s: ~str) {
debug!("{}", s);
if config.verbose { println!("{}", s); } | identifier_body |
util.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::config;
#[cfg(target_os = "win32")]
use std::os::getenv;
/// Conversion table from triple OS name to Rust SYSNAME
static OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "win32"),
("win32", "win32"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE.iter() {
if triple.contains(triple_os) {
return os
}
}
fail!("Cannot determine OS from triple");
}
#[cfg(target_os = "win32")]
pub fn make_new_path(path: &str) -> ~str {
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match getenv(lib_path_env_var()) {
Some(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
None => path.to_str()
}
}
#[cfg(target_os = "win32")]
pub fn | () -> ~str { ~"PATH" }
#[cfg(target_os = "win32")]
pub fn path_div() -> ~str { ~";" }
pub fn logv(config: &config, s: ~str) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
}
| lib_path_env_var | identifier_name |
util.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::config;
#[cfg(target_os = "win32")]
use std::os::getenv; |
/// Conversion table from triple OS name to Rust SYSNAME
static OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "win32"),
("win32", "win32"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE.iter() {
if triple.contains(triple_os) {
return os
}
}
fail!("Cannot determine OS from triple");
}
#[cfg(target_os = "win32")]
pub fn make_new_path(path: &str) -> ~str {
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match getenv(lib_path_env_var()) {
Some(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
None => path.to_str()
}
}
#[cfg(target_os = "win32")]
pub fn lib_path_env_var() -> ~str { ~"PATH" }
#[cfg(target_os = "win32")]
pub fn path_div() -> ~str { ~";" }
pub fn logv(config: &config, s: ~str) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
} | random_line_split |
|
no-landing-pads.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z no-landing-pads
use std::thread;
static mut HIT: bool = false;
struct A;
impl Drop for A {
fn drop(&mut self) {
unsafe { HIT = true; }
}
}
fn main() | {
thread::spawn(move|| -> () {
let _a = A;
panic!();
}).join().err().unwrap();
assert!(unsafe { !HIT });
} | identifier_body |
|
no-landing-pads.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z no-landing-pads
use std::thread;
static mut HIT: bool = false;
struct A;
impl Drop for A {
fn drop(&mut self) {
unsafe { HIT = true; }
}
}
fn | () {
thread::spawn(move|| -> () {
let _a = A;
panic!();
}).join().err().unwrap();
assert!(unsafe {!HIT });
}
| main | identifier_name |
no-landing-pads.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z no-landing-pads
use std::thread;
static mut HIT: bool = false;
struct A;
impl Drop for A {
fn drop(&mut self) {
unsafe { HIT = true; }
}
}
fn main() {
thread::spawn(move|| -> () {
let _a = A;
panic!();
}).join().err().unwrap();
assert!(unsafe {!HIT });
} | random_line_split |
|
cookiesetter.rs | /*
* This Source Code Form is subject to the
* terms of the Mozilla Public License, v. 2.0
*
* © Gregor Reitzenstein
*/
use iron::prelude::*;
use iron::AfterMiddleware;
use iron::headers::SetCookie;
use iron::typemap::Key;
use cookie::Cookie;
use api::API;
/// This Struct sets Cookies on outgoing Responses as necessary.
/// (i.e. For auth-tokens)
pub struct CookieSetter;
impl CookieSetter
{
pub fn new(_: &API) -> CookieSetter
{ |
impl AfterMiddleware for CookieSetter
{
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response>
{
// If the Request contains a CookieReq struct, set the specified Cookie
if req.extensions.contains::<CookieReq>()
{
let cookievalvec: Vec<[String; 2]> = req.extensions.remove::<CookieReq>().unwrap();
// A Cookie is a slice of two Strings: The key and the associated value
let cookies: Vec<Cookie> = cookievalvec.into_iter().map(|x| Cookie::new(x[1].clone(),x[2].clone())).collect();
res.headers.set(SetCookie(cookies));
}
Ok(res)
}
}
// This Struct notifies CookieSetter to set a cookie.
pub struct CookieReq;
// Key needs to be implented so this Struct can be inserted to req.extensions
impl Key for CookieReq { type Value = Vec<[String; 2]>; } | CookieSetter
}
} | random_line_split |
cookiesetter.rs | /*
* This Source Code Form is subject to the
* terms of the Mozilla Public License, v. 2.0
*
* © Gregor Reitzenstein
*/
use iron::prelude::*;
use iron::AfterMiddleware;
use iron::headers::SetCookie;
use iron::typemap::Key;
use cookie::Cookie;
use api::API;
/// This Struct sets Cookies on outgoing Responses as necessary.
/// (i.e. For auth-tokens)
pub struct CookieSetter;
impl CookieSetter
{
pub fn new(_: &API) -> CookieSetter
{
CookieSetter
}
}
impl AfterMiddleware for CookieSetter
{
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response>
{
// If the Request contains a CookieReq struct, set the specified Cookie
if req.extensions.contains::<CookieReq>()
{ | Ok(res)
}
}
// This Struct notifies CookieSetter to set a cookie.
pub struct CookieReq;
// Key needs to be implented so this Struct can be inserted to req.extensions
impl Key for CookieReq { type Value = Vec<[String; 2]>; }
|
let cookievalvec: Vec<[String; 2]> = req.extensions.remove::<CookieReq>().unwrap();
// A Cookie is a slice of two Strings: The key and the associated value
let cookies: Vec<Cookie> = cookievalvec.into_iter().map(|x| Cookie::new(x[1].clone(),x[2].clone())).collect();
res.headers.set(SetCookie(cookies));
}
| conditional_block |
cookiesetter.rs | /*
* This Source Code Form is subject to the
* terms of the Mozilla Public License, v. 2.0
*
* © Gregor Reitzenstein
*/
use iron::prelude::*;
use iron::AfterMiddleware;
use iron::headers::SetCookie;
use iron::typemap::Key;
use cookie::Cookie;
use api::API;
/// This Struct sets Cookies on outgoing Responses as necessary.
/// (i.e. For auth-tokens)
pub struct CookieSetter;
impl CookieSetter
{
pub fn n | _: &API) -> CookieSetter
{
CookieSetter
}
}
impl AfterMiddleware for CookieSetter
{
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response>
{
// If the Request contains a CookieReq struct, set the specified Cookie
if req.extensions.contains::<CookieReq>()
{
let cookievalvec: Vec<[String; 2]> = req.extensions.remove::<CookieReq>().unwrap();
// A Cookie is a slice of two Strings: The key and the associated value
let cookies: Vec<Cookie> = cookievalvec.into_iter().map(|x| Cookie::new(x[1].clone(),x[2].clone())).collect();
res.headers.set(SetCookie(cookies));
}
Ok(res)
}
}
// This Struct notifies CookieSetter to set a cookie.
pub struct CookieReq;
// Key needs to be implented so this Struct can be inserted to req.extensions
impl Key for CookieReq { type Value = Vec<[String; 2]>; }
| ew( | identifier_name |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 |
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
}
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn reader_writer() {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data);
for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
}
}
assert_eq!(&data[..], &compressed_data[..])
}
}
| {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
} | conditional_block |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
}
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn | () {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data);
for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
}
}
assert_eq!(&data[..], &compressed_data[..])
}
}
| reader_writer | identifier_name |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
}
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn reader_writer() {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data); | }
assert_eq!(&data[..], &compressed_data[..])
}
} | for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
} | random_line_split |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> |
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn reader_writer() {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data);
for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
}
}
assert_eq!(&data[..], &compressed_data[..])
}
}
| {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
} | identifier_body |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
}
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn create_io_mechanism_id<IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status!= self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
}
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind()!= ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
} | };
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
} | random_line_split |
|
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
}
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn create_io_mechanism_id<IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status!= self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => |
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind()!= ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
}
| {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
} | conditional_block |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
}
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn | <IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status!= self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
}
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind()!= ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
}
| create_io_mechanism_id | identifier_name |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> |
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn create_io_mechanism_id<IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status!= self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
}
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind()!= ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
}
| {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
} | identifier_body |
lib.rs | // option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! //...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx!= 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter { | }
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.metadata.level()
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst)!= UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst)!= 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else {
false
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off!= LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
assert | #[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other)) | random_line_split |
lib.rs | option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! //...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx!= 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel |
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst)!= UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst)!= 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else {
false
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off!= LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
| {
self.metadata.level()
} | identifier_body |
lib.rs | option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! //...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx!= 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn | (&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.metadata.level()
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst)!= UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst)!= 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else {
false
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off!= LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
| eq | identifier_name |
lib.rs | option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! //...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx!= 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.metadata.level()
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst)!= UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst)!= 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else |
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off!= LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
| {
false
} | conditional_block |
main.rs | extern crate crypto;
extern crate itertools;
use std::env;
use crypto::md5::Md5;
use crypto::digest::Digest;
use std::collections::HashMap;
fn main() {
let raw_input = env::args().nth(1).unwrap_or("".to_string());
let input = raw_input.as_bytes();
let mut index : i64 = -1;
let mut hash = Md5::new();
let mut output = String::new();
let mut output2 = HashMap::new();
while {output.len() < 8 || output2.len() < 8} {
let mut temp = [0; 16];
while {
index += 1;
hash.reset();
hash.input(input);
hash.input(index.to_string().as_bytes());
hash.result(&mut temp);
(temp[0] as i32 + temp[1] as i32 + (temp[2] >> 4) as i32)!= 0
} {}
let out = format!("{:x}", temp[2] & 0xf);
let out2 = format!("{:x}", temp[3] >> 4);
let pos = (temp[2] & 0x0f) as i32;
if output.len() < 8 |
if pos < 8 &&!output2.contains_key(&pos) {
output2.insert(pos, out2);
}
}
println!("P1 {}", output);
let empty = " ".to_string();
let temp = (0..8)
.map(|i| output2.get(&(i as i32)).unwrap_or(&empty))
.cloned()
.collect::<Vec<_>>()
.concat();
println!("P2 {}", temp);
}
| {
output.push_str(&out);
} | conditional_block |
main.rs | extern crate crypto;
extern crate itertools;
use std::env;
use crypto::md5::Md5;
use crypto::digest::Digest;
use std::collections::HashMap;
fn | () {
let raw_input = env::args().nth(1).unwrap_or("".to_string());
let input = raw_input.as_bytes();
let mut index : i64 = -1;
let mut hash = Md5::new();
let mut output = String::new();
let mut output2 = HashMap::new();
while {output.len() < 8 || output2.len() < 8} {
let mut temp = [0; 16];
while {
index += 1;
hash.reset();
hash.input(input);
hash.input(index.to_string().as_bytes());
hash.result(&mut temp);
(temp[0] as i32 + temp[1] as i32 + (temp[2] >> 4) as i32)!= 0
} {}
let out = format!("{:x}", temp[2] & 0xf);
let out2 = format!("{:x}", temp[3] >> 4);
let pos = (temp[2] & 0x0f) as i32;
if output.len() < 8 {
output.push_str(&out);
}
if pos < 8 &&!output2.contains_key(&pos) {
output2.insert(pos, out2);
}
}
println!("P1 {}", output);
let empty = " ".to_string();
let temp = (0..8)
.map(|i| output2.get(&(i as i32)).unwrap_or(&empty))
.cloned()
.collect::<Vec<_>>()
.concat();
println!("P2 {}", temp);
}
| main | identifier_name |
main.rs | extern crate crypto;
extern crate itertools;
use std::env;
use crypto::md5::Md5;
use crypto::digest::Digest;
use std::collections::HashMap;
fn main() |
(temp[0] as i32 + temp[1] as i32 + (temp[2] >> 4) as i32)!= 0
} {}
let out = format!("{:x}", temp[2] & 0xf);
let out2 = format!("{:x}", temp[3] >> 4);
let pos = (temp[2] & 0x0f) as i32;
if output.len() < 8 {
output.push_str(&out);
}
if pos < 8 &&!output2.contains_key(&pos) {
output2.insert(pos, out2);
}
}
println!("P1 {}", output);
let empty = " ".to_string();
let temp = (0..8)
.map(|i| output2.get(&(i as i32)).unwrap_or(&empty))
.cloned()
.collect::<Vec<_>>()
.concat();
println!("P2 {}", temp);
}
| {
let raw_input = env::args().nth(1).unwrap_or("".to_string());
let input = raw_input.as_bytes();
let mut index : i64 = -1;
let mut hash = Md5::new();
let mut output = String::new();
let mut output2 = HashMap::new();
while {output.len() < 8 || output2.len() < 8} {
let mut temp = [0; 16];
while {
index += 1;
hash.reset();
hash.input(input);
hash.input(index.to_string().as_bytes());
hash.result(&mut temp); | identifier_body |
main.rs | extern crate crypto;
extern crate itertools;
use std::env;
use crypto::md5::Md5;
use crypto::digest::Digest;
use std::collections::HashMap;
fn main() {
let raw_input = env::args().nth(1).unwrap_or("".to_string());
let input = raw_input.as_bytes();
let mut index : i64 = -1;
let mut hash = Md5::new();
let mut output = String::new();
let mut output2 = HashMap::new();
while {output.len() < 8 || output2.len() < 8} {
let mut temp = [0; 16];
while {
index += 1;
hash.reset(); |
hash.input(input);
hash.input(index.to_string().as_bytes());
hash.result(&mut temp);
(temp[0] as i32 + temp[1] as i32 + (temp[2] >> 4) as i32)!= 0
} {}
let out = format!("{:x}", temp[2] & 0xf);
let out2 = format!("{:x}", temp[3] >> 4);
let pos = (temp[2] & 0x0f) as i32;
if output.len() < 8 {
output.push_str(&out);
}
if pos < 8 &&!output2.contains_key(&pos) {
output2.insert(pos, out2);
}
}
println!("P1 {}", output);
let empty = " ".to_string();
let temp = (0..8)
.map(|i| output2.get(&(i as i32)).unwrap_or(&empty))
.cloned()
.collect::<Vec<_>>()
.concat();
println!("P2 {}", temp);
} | random_line_split |
|
dependency.rs | use std::fmt;
use std::rc::Rc;
use std::str::FromStr;
use semver::VersionReq;
use semver::ReqParseError;
use serde::ser;
use core::{PackageId, SourceId, Summary};
use core::interning::InternedString;
use util::{Cfg, CfgExpr, Config};
use util::errors::{CargoError, CargoResult, CargoResultExt};
/// Information about a dependency requested by a Cargo manifest.
/// Cheap to copy.
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)]
pub struct Dependency {
inner: Rc<Inner>,
}
/// The data underlying a Dependency.
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)]
struct Inner {
name: InternedString,
source_id: SourceId,
registry_id: Option<SourceId>,
req: VersionReq,
specified_req: bool,
kind: Kind,
only_match_name: bool,
rename: Option<String>,
optional: bool,
default_features: bool,
features: Vec<InternedString>,
// This dependency should be used only for this platform.
// `None` means *all platforms*.
platform: Option<Platform>,
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)]
pub enum Platform {
Name(String),
Cfg(CfgExpr),
}
#[derive(Serialize)]
struct SerializedDependency<'a> {
name: &'a str,
source: &'a SourceId,
req: String,
kind: Kind,
rename: Option<&'a str>,
optional: bool,
uses_default_features: bool,
features: &'a [String],
target: Option<&'a Platform>,
}
impl ser::Serialize for Dependency {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
let string_features: Vec<_> = self.features().iter().map(|s| s.to_string()).collect();
SerializedDependency {
name: &*self.name(),
source: self.source_id(),
req: self.version_req().to_string(),
kind: self.kind(),
optional: self.is_optional(),
uses_default_features: self.uses_default_features(),
features: &string_features,
target: self.platform(),
rename: self.rename(),
}.serialize(s)
}
}
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)]
pub enum Kind {
Normal,
Development,
Build,
}
fn parse_req_with_deprecated(
req: &str,
extra: Option<(&PackageId, &Config)>,
) -> CargoResult<VersionReq> {
match VersionReq::parse(req) {
Err(e) => {
let (inside, config) = match extra {
Some(pair) => pair,
None => return Err(e.into()),
};
match e {
ReqParseError::DeprecatedVersionRequirement(requirement) => {
let msg = format!(
"\
parsed version requirement `{}` is no longer valid
Previous versions of Cargo accepted this malformed requirement,
but it is being deprecated. This was found when parsing the manifest
of {} {}, and the correct version requirement is `{}`.
This will soon become a hard error, so it's either recommended to
update to a fixed version or contact the upstream maintainer about
this warning.
",
req,
inside.name(),
inside.version(),
requirement
);
config.shell().warn(&msg)?;
Ok(requirement)
}
e => Err(e.into()),
}
}
Ok(v) => Ok(v),
}
}
impl ser::Serialize for Kind {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
match *self {
Kind::Normal => None,
Kind::Development => Some("dev"),
Kind::Build => Some("build"),
}.serialize(s)
}
}
impl Dependency {
/// Attempt to create a `Dependency` from an entry in the manifest.
pub fn parse(
name: &str,
version: Option<&str>,
source_id: &SourceId,
inside: &PackageId,
config: &Config,
) -> CargoResult<Dependency> {
let arg = Some((inside, config));
let (specified_req, version_req) = match version {
Some(v) => (true, parse_req_with_deprecated(v, arg)?),
None => (false, VersionReq::any()),
};
let mut ret = Dependency::new_override(name, source_id);
{
let ptr = Rc::make_mut(&mut ret.inner);
ptr.only_match_name = false;
ptr.req = version_req;
ptr.specified_req = specified_req;
}
Ok(ret)
}
/// Attempt to create a `Dependency` from an entry in the manifest.
pub fn parse_no_deprecated(
name: &str,
version: Option<&str>,
source_id: &SourceId,
) -> CargoResult<Dependency> {
let (specified_req, version_req) = match version {
Some(v) => (true, parse_req_with_deprecated(v, None)?),
None => (false, VersionReq::any()),
};
let mut ret = Dependency::new_override(name, source_id);
{
let ptr = Rc::make_mut(&mut ret.inner);
ptr.only_match_name = false;
ptr.req = version_req;
ptr.specified_req = specified_req;
}
Ok(ret)
}
pub fn new_override(name: &str, source_id: &SourceId) -> Dependency {
assert!(!name.is_empty());
Dependency {
inner: Rc::new(Inner {
name: InternedString::new(name),
source_id: source_id.clone(),
registry_id: None,
req: VersionReq::any(),
kind: Kind::Normal,
only_match_name: true,
optional: false,
features: Vec::new(),
default_features: true,
specified_req: false,
platform: None,
rename: None,
}),
}
}
pub fn version_req(&self) -> &VersionReq {
&self.inner.req
}
pub fn name(&self) -> InternedString {
self.inner.name
}
pub fn source_id(&self) -> &SourceId {
&self.inner.source_id
}
pub fn registry_id(&self) -> Option<&SourceId> {
self.inner.registry_id.as_ref()
}
pub fn set_registry_id(&mut self, registry_id: &SourceId) -> &mut Dependency {
Rc::make_mut(&mut self.inner).registry_id = Some(registry_id.clone());
self
}
pub fn kind(&self) -> Kind {
self.inner.kind
}
pub fn specified_req(&self) -> bool {
self.inner.specified_req
}
/// If none, this dependencies must be built for all platforms.
/// If some, it must only be built for the specified platform.
pub fn platform(&self) -> Option<&Platform> {
self.inner.platform.as_ref()
}
pub fn | (&self) -> Option<&str> {
self.inner.rename.as_ref().map(|s| &**s)
}
pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency {
Rc::make_mut(&mut self.inner).kind = kind;
self
}
/// Sets the list of features requested for the package.
pub fn set_features(&mut self, features: Vec<String>) -> &mut Dependency {
Rc::make_mut(&mut self.inner).features =
features.iter().map(|s| InternedString::new(s)).collect();
self
}
/// Sets whether the dependency requests default features of the package.
pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency {
Rc::make_mut(&mut self.inner).default_features = default_features;
self
}
/// Sets whether the dependency is optional.
pub fn set_optional(&mut self, optional: bool) -> &mut Dependency {
Rc::make_mut(&mut self.inner).optional = optional;
self
}
/// Set the source id for this dependency
pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency {
Rc::make_mut(&mut self.inner).source_id = id;
self
}
/// Set the version requirement for this dependency
pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency {
Rc::make_mut(&mut self.inner).req = req;
self
}
pub fn set_platform(&mut self, platform: Option<Platform>) -> &mut Dependency {
Rc::make_mut(&mut self.inner).platform = platform;
self
}
pub fn set_rename(&mut self, rename: &str) -> &mut Dependency {
Rc::make_mut(&mut self.inner).rename = Some(rename.to_string());
self
}
/// Lock this dependency to depending on the specified package id
pub fn lock_to(&mut self, id: &PackageId) -> &mut Dependency {
assert_eq!(self.inner.source_id, *id.source_id());
assert!(self.inner.req.matches(id.version()));
trace!(
"locking dep from `{}` with `{}` at {} to {}",
self.name(),
self.version_req(),
self.source_id(),
id
);
self.set_version_req(VersionReq::exact(id.version()))
.set_source_id(id.source_id().clone())
}
/// Returns whether this is a "locked" dependency, basically whether it has
/// an exact version req.
pub fn is_locked(&self) -> bool {
// Kind of a hack to figure this out, but it works!
self.inner.req.to_string().starts_with('=')
}
/// Returns false if the dependency is only used to build the local package.
pub fn is_transitive(&self) -> bool {
match self.inner.kind {
Kind::Normal | Kind::Build => true,
Kind::Development => false,
}
}
pub fn is_build(&self) -> bool {
match self.inner.kind {
Kind::Build => true,
_ => false,
}
}
pub fn is_optional(&self) -> bool {
self.inner.optional
}
/// Returns true if the default features of the dependency are requested.
pub fn uses_default_features(&self) -> bool {
self.inner.default_features
}
/// Returns the list of features that are requested by the dependency.
pub fn features(&self) -> &[InternedString] {
&self.inner.features
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches(&self, sum: &Summary) -> bool {
self.matches_id(sum.package_id())
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches_ignoring_source(&self, id: &PackageId) -> bool {
self.name() == id.name() && self.version_req().matches(id.version())
}
/// Returns true if the package (`id`) can fulfill this dependency request.
pub fn matches_id(&self, id: &PackageId) -> bool {
self.inner.name == id.name()
&& (self.inner.only_match_name
|| (self.inner.req.matches(id.version())
&& &self.inner.source_id == id.source_id()))
}
pub fn map_source(mut self, to_replace: &SourceId, replace_with: &SourceId) -> Dependency {
if self.source_id()!= to_replace {
self
} else {
self.set_source_id(replace_with.clone());
self
}
}
}
impl Platform {
pub fn matches(&self, name: &str, cfg: Option<&[Cfg]>) -> bool {
match *self {
Platform::Name(ref p) => p == name,
Platform::Cfg(ref p) => match cfg {
Some(cfg) => p.matches(cfg),
None => false,
},
}
}
}
impl ser::Serialize for Platform {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.to_string().serialize(s)
}
}
impl FromStr for Platform {
type Err = CargoError;
fn from_str(s: &str) -> CargoResult<Platform> {
if s.starts_with("cfg(") && s.ends_with(')') {
let s = &s[4..s.len() - 1];
let p = s.parse()
.map(Platform::Cfg)
.chain_err(|| format_err!("failed to parse `{}` as a cfg expression", s))?;
Ok(p)
} else {
Ok(Platform::Name(s.to_string()))
}
}
}
impl fmt::Display for Platform {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Platform::Name(ref n) => n.fmt(f),
Platform::Cfg(ref e) => write!(f, "cfg({})", e),
}
}
}
| rename | identifier_name |
dependency.rs | use std::fmt;
use std::rc::Rc;
use std::str::FromStr;
use semver::VersionReq;
use semver::ReqParseError;
use serde::ser;
use core::{PackageId, SourceId, Summary};
use core::interning::InternedString;
use util::{Cfg, CfgExpr, Config};
use util::errors::{CargoError, CargoResult, CargoResultExt};
/// Information about a dependency requested by a Cargo manifest.
/// Cheap to copy.
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)]
pub struct Dependency {
inner: Rc<Inner>,
}
/// The data underlying a Dependency.
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)]
struct Inner {
name: InternedString,
source_id: SourceId,
registry_id: Option<SourceId>,
req: VersionReq,
specified_req: bool,
kind: Kind,
only_match_name: bool,
rename: Option<String>,
optional: bool,
default_features: bool,
features: Vec<InternedString>,
// This dependency should be used only for this platform.
// `None` means *all platforms*.
platform: Option<Platform>,
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)]
pub enum Platform {
Name(String),
Cfg(CfgExpr),
}
#[derive(Serialize)]
struct SerializedDependency<'a> {
name: &'a str,
source: &'a SourceId,
req: String,
kind: Kind,
rename: Option<&'a str>,
optional: bool,
uses_default_features: bool,
features: &'a [String],
target: Option<&'a Platform>,
}
impl ser::Serialize for Dependency {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
let string_features: Vec<_> = self.features().iter().map(|s| s.to_string()).collect();
SerializedDependency {
name: &*self.name(),
source: self.source_id(),
req: self.version_req().to_string(),
kind: self.kind(),
optional: self.is_optional(),
uses_default_features: self.uses_default_features(),
features: &string_features,
target: self.platform(),
rename: self.rename(),
}.serialize(s)
}
}
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)]
pub enum Kind {
Normal,
Development,
Build,
}
fn parse_req_with_deprecated(
req: &str,
extra: Option<(&PackageId, &Config)>,
) -> CargoResult<VersionReq> {
match VersionReq::parse(req) {
Err(e) => {
let (inside, config) = match extra {
Some(pair) => pair,
None => return Err(e.into()),
};
match e {
ReqParseError::DeprecatedVersionRequirement(requirement) => {
let msg = format!(
"\
parsed version requirement `{}` is no longer valid
Previous versions of Cargo accepted this malformed requirement,
but it is being deprecated. This was found when parsing the manifest
of {} {}, and the correct version requirement is `{}`.
This will soon become a hard error, so it's either recommended to
update to a fixed version or contact the upstream maintainer about
this warning.
",
req,
inside.name(),
inside.version(),
requirement
);
config.shell().warn(&msg)?;
Ok(requirement)
}
e => Err(e.into()),
}
}
Ok(v) => Ok(v),
}
}
impl ser::Serialize for Kind {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
match *self {
Kind::Normal => None,
Kind::Development => Some("dev"),
Kind::Build => Some("build"),
}.serialize(s)
}
}
impl Dependency {
/// Attempt to create a `Dependency` from an entry in the manifest.
pub fn parse(
name: &str,
version: Option<&str>,
source_id: &SourceId,
inside: &PackageId,
config: &Config,
) -> CargoResult<Dependency> {
let arg = Some((inside, config));
let (specified_req, version_req) = match version {
Some(v) => (true, parse_req_with_deprecated(v, arg)?),
None => (false, VersionReq::any()),
};
let mut ret = Dependency::new_override(name, source_id);
{
let ptr = Rc::make_mut(&mut ret.inner);
ptr.only_match_name = false;
ptr.req = version_req;
ptr.specified_req = specified_req;
}
Ok(ret)
}
/// Attempt to create a `Dependency` from an entry in the manifest.
pub fn parse_no_deprecated(
name: &str,
version: Option<&str>,
source_id: &SourceId,
) -> CargoResult<Dependency> {
let (specified_req, version_req) = match version {
Some(v) => (true, parse_req_with_deprecated(v, None)?),
None => (false, VersionReq::any()),
};
let mut ret = Dependency::new_override(name, source_id);
{
let ptr = Rc::make_mut(&mut ret.inner);
ptr.only_match_name = false;
ptr.req = version_req;
ptr.specified_req = specified_req;
}
Ok(ret)
}
pub fn new_override(name: &str, source_id: &SourceId) -> Dependency {
assert!(!name.is_empty());
Dependency {
inner: Rc::new(Inner {
name: InternedString::new(name),
source_id: source_id.clone(),
registry_id: None,
req: VersionReq::any(),
kind: Kind::Normal,
only_match_name: true,
optional: false,
features: Vec::new(),
default_features: true,
specified_req: false,
platform: None,
rename: None,
}),
}
}
pub fn version_req(&self) -> &VersionReq {
&self.inner.req
}
pub fn name(&self) -> InternedString {
self.inner.name
}
pub fn source_id(&self) -> &SourceId {
&self.inner.source_id
}
pub fn registry_id(&self) -> Option<&SourceId> {
self.inner.registry_id.as_ref()
}
pub fn set_registry_id(&mut self, registry_id: &SourceId) -> &mut Dependency {
Rc::make_mut(&mut self.inner).registry_id = Some(registry_id.clone());
self
}
pub fn kind(&self) -> Kind {
self.inner.kind
}
pub fn specified_req(&self) -> bool {
self.inner.specified_req
}
/// If none, this dependencies must be built for all platforms.
/// If some, it must only be built for the specified platform.
pub fn platform(&self) -> Option<&Platform> {
self.inner.platform.as_ref()
}
pub fn rename(&self) -> Option<&str> {
self.inner.rename.as_ref().map(|s| &**s)
}
pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency {
Rc::make_mut(&mut self.inner).kind = kind;
self
}
/// Sets the list of features requested for the package.
pub fn set_features(&mut self, features: Vec<String>) -> &mut Dependency {
Rc::make_mut(&mut self.inner).features =
features.iter().map(|s| InternedString::new(s)).collect();
self
}
/// Sets whether the dependency requests default features of the package.
pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency {
Rc::make_mut(&mut self.inner).default_features = default_features;
self
}
/// Sets whether the dependency is optional.
pub fn set_optional(&mut self, optional: bool) -> &mut Dependency {
Rc::make_mut(&mut self.inner).optional = optional;
self
}
/// Set the source id for this dependency
pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency {
Rc::make_mut(&mut self.inner).source_id = id;
self
}
/// Set the version requirement for this dependency
pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency {
Rc::make_mut(&mut self.inner).req = req;
self
}
pub fn set_platform(&mut self, platform: Option<Platform>) -> &mut Dependency {
Rc::make_mut(&mut self.inner).platform = platform;
self
}
pub fn set_rename(&mut self, rename: &str) -> &mut Dependency {
Rc::make_mut(&mut self.inner).rename = Some(rename.to_string());
self
}
/// Lock this dependency to depending on the specified package id
pub fn lock_to(&mut self, id: &PackageId) -> &mut Dependency {
assert_eq!(self.inner.source_id, *id.source_id());
assert!(self.inner.req.matches(id.version()));
trace!(
"locking dep from `{}` with `{}` at {} to {}",
self.name(),
self.version_req(),
self.source_id(),
id
);
self.set_version_req(VersionReq::exact(id.version()))
.set_source_id(id.source_id().clone())
}
/// Returns whether this is a "locked" dependency, basically whether it has
/// an exact version req.
pub fn is_locked(&self) -> bool {
// Kind of a hack to figure this out, but it works!
self.inner.req.to_string().starts_with('=')
}
/// Returns false if the dependency is only used to build the local package.
pub fn is_transitive(&self) -> bool {
match self.inner.kind {
Kind::Normal | Kind::Build => true,
Kind::Development => false,
}
}
pub fn is_build(&self) -> bool {
match self.inner.kind {
Kind::Build => true,
_ => false,
}
}
pub fn is_optional(&self) -> bool {
self.inner.optional
}
/// Returns true if the default features of the dependency are requested.
pub fn uses_default_features(&self) -> bool {
self.inner.default_features
}
/// Returns the list of features that are requested by the dependency.
pub fn features(&self) -> &[InternedString] {
&self.inner.features
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches(&self, sum: &Summary) -> bool {
self.matches_id(sum.package_id())
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches_ignoring_source(&self, id: &PackageId) -> bool {
self.name() == id.name() && self.version_req().matches(id.version())
}
/// Returns true if the package (`id`) can fulfill this dependency request.
pub fn matches_id(&self, id: &PackageId) -> bool {
self.inner.name == id.name()
&& (self.inner.only_match_name
|| (self.inner.req.matches(id.version())
&& &self.inner.source_id == id.source_id()))
}
pub fn map_source(mut self, to_replace: &SourceId, replace_with: &SourceId) -> Dependency {
if self.source_id()!= to_replace {
self
} else {
self.set_source_id(replace_with.clone());
self
}
}
}
impl Platform {
pub fn matches(&self, name: &str, cfg: Option<&[Cfg]>) -> bool {
match *self {
Platform::Name(ref p) => p == name,
Platform::Cfg(ref p) => match cfg {
Some(cfg) => p.matches(cfg),
None => false,
},
}
}
}
impl ser::Serialize for Platform {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.to_string().serialize(s)
}
}
impl FromStr for Platform {
type Err = CargoError;
fn from_str(s: &str) -> CargoResult<Platform> {
if s.starts_with("cfg(") && s.ends_with(')') {
let s = &s[4..s.len() - 1];
let p = s.parse()
.map(Platform::Cfg)
.chain_err(|| format_err!("failed to parse `{}` as a cfg expression", s))?;
Ok(p)
} else {
Ok(Platform::Name(s.to_string()))
}
}
}
impl fmt::Display for Platform {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Platform::Name(ref n) => n.fmt(f),
Platform::Cfg(ref e) => write!(f, "cfg({})", e),
}
} | } | random_line_split |
|
dependency.rs | use std::fmt;
use std::rc::Rc;
use std::str::FromStr;
use semver::VersionReq;
use semver::ReqParseError;
use serde::ser;
use core::{PackageId, SourceId, Summary};
use core::interning::InternedString;
use util::{Cfg, CfgExpr, Config};
use util::errors::{CargoError, CargoResult, CargoResultExt};
/// Information about a dependency requested by a Cargo manifest.
/// Cheap to copy.
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)]
pub struct Dependency {
inner: Rc<Inner>,
}
/// The data underlying a Dependency.
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)]
struct Inner {
name: InternedString,
source_id: SourceId,
registry_id: Option<SourceId>,
req: VersionReq,
specified_req: bool,
kind: Kind,
only_match_name: bool,
rename: Option<String>,
optional: bool,
default_features: bool,
features: Vec<InternedString>,
// This dependency should be used only for this platform.
// `None` means *all platforms*.
platform: Option<Platform>,
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)]
pub enum Platform {
Name(String),
Cfg(CfgExpr),
}
#[derive(Serialize)]
struct SerializedDependency<'a> {
name: &'a str,
source: &'a SourceId,
req: String,
kind: Kind,
rename: Option<&'a str>,
optional: bool,
uses_default_features: bool,
features: &'a [String],
target: Option<&'a Platform>,
}
impl ser::Serialize for Dependency {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
let string_features: Vec<_> = self.features().iter().map(|s| s.to_string()).collect();
SerializedDependency {
name: &*self.name(),
source: self.source_id(),
req: self.version_req().to_string(),
kind: self.kind(),
optional: self.is_optional(),
uses_default_features: self.uses_default_features(),
features: &string_features,
target: self.platform(),
rename: self.rename(),
}.serialize(s)
}
}
#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)]
pub enum Kind {
Normal,
Development,
Build,
}
fn parse_req_with_deprecated(
req: &str,
extra: Option<(&PackageId, &Config)>,
) -> CargoResult<VersionReq> {
match VersionReq::parse(req) {
Err(e) => {
let (inside, config) = match extra {
Some(pair) => pair,
None => return Err(e.into()),
};
match e {
ReqParseError::DeprecatedVersionRequirement(requirement) => {
let msg = format!(
"\
parsed version requirement `{}` is no longer valid
Previous versions of Cargo accepted this malformed requirement,
but it is being deprecated. This was found when parsing the manifest
of {} {}, and the correct version requirement is `{}`.
This will soon become a hard error, so it's either recommended to
update to a fixed version or contact the upstream maintainer about
this warning.
",
req,
inside.name(),
inside.version(),
requirement
);
config.shell().warn(&msg)?;
Ok(requirement)
}
e => Err(e.into()),
}
}
Ok(v) => Ok(v),
}
}
impl ser::Serialize for Kind {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
match *self {
Kind::Normal => None,
Kind::Development => Some("dev"),
Kind::Build => Some("build"),
}.serialize(s)
}
}
impl Dependency {
/// Attempt to create a `Dependency` from an entry in the manifest.
pub fn parse(
name: &str,
version: Option<&str>,
source_id: &SourceId,
inside: &PackageId,
config: &Config,
) -> CargoResult<Dependency> {
let arg = Some((inside, config));
let (specified_req, version_req) = match version {
Some(v) => (true, parse_req_with_deprecated(v, arg)?),
None => (false, VersionReq::any()),
};
let mut ret = Dependency::new_override(name, source_id);
{
let ptr = Rc::make_mut(&mut ret.inner);
ptr.only_match_name = false;
ptr.req = version_req;
ptr.specified_req = specified_req;
}
Ok(ret)
}
/// Attempt to create a `Dependency` from an entry in the manifest.
pub fn parse_no_deprecated(
name: &str,
version: Option<&str>,
source_id: &SourceId,
) -> CargoResult<Dependency> {
let (specified_req, version_req) = match version {
Some(v) => (true, parse_req_with_deprecated(v, None)?),
None => (false, VersionReq::any()),
};
let mut ret = Dependency::new_override(name, source_id);
{
let ptr = Rc::make_mut(&mut ret.inner);
ptr.only_match_name = false;
ptr.req = version_req;
ptr.specified_req = specified_req;
}
Ok(ret)
}
pub fn new_override(name: &str, source_id: &SourceId) -> Dependency {
assert!(!name.is_empty());
Dependency {
inner: Rc::new(Inner {
name: InternedString::new(name),
source_id: source_id.clone(),
registry_id: None,
req: VersionReq::any(),
kind: Kind::Normal,
only_match_name: true,
optional: false,
features: Vec::new(),
default_features: true,
specified_req: false,
platform: None,
rename: None,
}),
}
}
pub fn version_req(&self) -> &VersionReq {
&self.inner.req
}
pub fn name(&self) -> InternedString {
self.inner.name
}
pub fn source_id(&self) -> &SourceId {
&self.inner.source_id
}
pub fn registry_id(&self) -> Option<&SourceId> {
self.inner.registry_id.as_ref()
}
pub fn set_registry_id(&mut self, registry_id: &SourceId) -> &mut Dependency {
Rc::make_mut(&mut self.inner).registry_id = Some(registry_id.clone());
self
}
pub fn kind(&self) -> Kind {
self.inner.kind
}
pub fn specified_req(&self) -> bool {
self.inner.specified_req
}
/// If none, this dependencies must be built for all platforms.
/// If some, it must only be built for the specified platform.
pub fn platform(&self) -> Option<&Platform> {
self.inner.platform.as_ref()
}
pub fn rename(&self) -> Option<&str> {
self.inner.rename.as_ref().map(|s| &**s)
}
pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency {
Rc::make_mut(&mut self.inner).kind = kind;
self
}
/// Sets the list of features requested for the package.
pub fn set_features(&mut self, features: Vec<String>) -> &mut Dependency {
Rc::make_mut(&mut self.inner).features =
features.iter().map(|s| InternedString::new(s)).collect();
self
}
/// Sets whether the dependency requests default features of the package.
pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency {
Rc::make_mut(&mut self.inner).default_features = default_features;
self
}
/// Sets whether the dependency is optional.
pub fn set_optional(&mut self, optional: bool) -> &mut Dependency {
Rc::make_mut(&mut self.inner).optional = optional;
self
}
/// Set the source id for this dependency
pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency {
Rc::make_mut(&mut self.inner).source_id = id;
self
}
/// Set the version requirement for this dependency
pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency {
Rc::make_mut(&mut self.inner).req = req;
self
}
pub fn set_platform(&mut self, platform: Option<Platform>) -> &mut Dependency {
Rc::make_mut(&mut self.inner).platform = platform;
self
}
pub fn set_rename(&mut self, rename: &str) -> &mut Dependency {
Rc::make_mut(&mut self.inner).rename = Some(rename.to_string());
self
}
/// Lock this dependency to depending on the specified package id
pub fn lock_to(&mut self, id: &PackageId) -> &mut Dependency {
assert_eq!(self.inner.source_id, *id.source_id());
assert!(self.inner.req.matches(id.version()));
trace!(
"locking dep from `{}` with `{}` at {} to {}",
self.name(),
self.version_req(),
self.source_id(),
id
);
self.set_version_req(VersionReq::exact(id.version()))
.set_source_id(id.source_id().clone())
}
/// Returns whether this is a "locked" dependency, basically whether it has
/// an exact version req.
pub fn is_locked(&self) -> bool {
// Kind of a hack to figure this out, but it works!
self.inner.req.to_string().starts_with('=')
}
/// Returns false if the dependency is only used to build the local package.
pub fn is_transitive(&self) -> bool {
match self.inner.kind {
Kind::Normal | Kind::Build => true,
Kind::Development => false,
}
}
pub fn is_build(&self) -> bool {
match self.inner.kind {
Kind::Build => true,
_ => false,
}
}
pub fn is_optional(&self) -> bool |
/// Returns true if the default features of the dependency are requested.
pub fn uses_default_features(&self) -> bool {
self.inner.default_features
}
/// Returns the list of features that are requested by the dependency.
pub fn features(&self) -> &[InternedString] {
&self.inner.features
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches(&self, sum: &Summary) -> bool {
self.matches_id(sum.package_id())
}
/// Returns true if the package (`sum`) can fulfill this dependency request.
pub fn matches_ignoring_source(&self, id: &PackageId) -> bool {
self.name() == id.name() && self.version_req().matches(id.version())
}
/// Returns true if the package (`id`) can fulfill this dependency request.
pub fn matches_id(&self, id: &PackageId) -> bool {
self.inner.name == id.name()
&& (self.inner.only_match_name
|| (self.inner.req.matches(id.version())
&& &self.inner.source_id == id.source_id()))
}
pub fn map_source(mut self, to_replace: &SourceId, replace_with: &SourceId) -> Dependency {
if self.source_id()!= to_replace {
self
} else {
self.set_source_id(replace_with.clone());
self
}
}
}
impl Platform {
pub fn matches(&self, name: &str, cfg: Option<&[Cfg]>) -> bool {
match *self {
Platform::Name(ref p) => p == name,
Platform::Cfg(ref p) => match cfg {
Some(cfg) => p.matches(cfg),
None => false,
},
}
}
}
impl ser::Serialize for Platform {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.to_string().serialize(s)
}
}
impl FromStr for Platform {
type Err = CargoError;
fn from_str(s: &str) -> CargoResult<Platform> {
if s.starts_with("cfg(") && s.ends_with(')') {
let s = &s[4..s.len() - 1];
let p = s.parse()
.map(Platform::Cfg)
.chain_err(|| format_err!("failed to parse `{}` as a cfg expression", s))?;
Ok(p)
} else {
Ok(Platform::Name(s.to_string()))
}
}
}
impl fmt::Display for Platform {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Platform::Name(ref n) => n.fmt(f),
Platform::Cfg(ref e) => write!(f, "cfg({})", e),
}
}
}
| {
self.inner.optional
} | identifier_body |
tcp_client.rs | use std::{fmt, io};
use std::sync::Arc;
use std::net::SocketAddr;
use std::marker::PhantomData;
use BindClient;
use tokio_core::reactor::Handle;
use tokio_core::net::{TcpStream, TcpStreamNew};
use futures::{Future, Poll, Async};
// TODO: add configuration, e.g.:
// - connection timeout
// - multiple addresses
// - request timeout
// TODO: consider global event loop handle, so that providing one in the builder
// is optional
/// Builds client connections to external services.
///
/// To connect to a service, you need a *client protocol* implementation; see
/// the crate documentation for guidance. | #[derive(Debug)]
pub struct TcpClient<Kind, P> {
_kind: PhantomData<Kind>,
proto: Arc<P>,
}
/// A future for establishing a client connection.
///
/// Yields a service for interacting with the server.
pub struct Connect<Kind, P> {
_kind: PhantomData<Kind>,
proto: Arc<P>,
socket: TcpStreamNew,
handle: Handle,
}
impl<Kind, P> Future for Connect<Kind, P> where P: BindClient<Kind, TcpStream> {
type Item = P::BindClient;
type Error = io::Error;
fn poll(&mut self) -> Poll<P::BindClient, io::Error> {
let socket = try_ready!(self.socket.poll());
Ok(Async::Ready(self.proto.bind_client(&self.handle, socket)))
}
}
impl<Kind, P> TcpClient<Kind, P> where P: BindClient<Kind, TcpStream> {
/// Create a builder for the given client protocol.
///
/// To connect to a service, you need a *client protocol* implementation;
/// see the crate documentation for guidance.
pub fn new(protocol: P) -> TcpClient<Kind, P> {
TcpClient {
_kind: PhantomData,
proto: Arc::new(protocol)
}
}
/// Establish a connection to the given address.
///
/// # Return value
///
/// Returns a future for the establishment of the connection. When the
/// future completes, it yields an instance of `Service` for interacting
/// with the server.
pub fn connect(&self, addr: &SocketAddr, handle: &Handle) -> Connect<Kind, P> {
Connect {
_kind: PhantomData,
proto: self.proto.clone(),
socket: TcpStream::connect(addr, handle),
handle: handle.clone(),
}
}
}
impl<Kind, P> fmt::Debug for Connect<Kind, P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Connect {{... }}")
}
} | ///
/// At the moment, this builder offers minimal configuration, but more will be
/// added over time. | random_line_split |
tcp_client.rs | use std::{fmt, io};
use std::sync::Arc;
use std::net::SocketAddr;
use std::marker::PhantomData;
use BindClient;
use tokio_core::reactor::Handle;
use tokio_core::net::{TcpStream, TcpStreamNew};
use futures::{Future, Poll, Async};
// TODO: add configuration, e.g.:
// - connection timeout
// - multiple addresses
// - request timeout
// TODO: consider global event loop handle, so that providing one in the builder
// is optional
/// Builds client connections to external services.
///
/// To connect to a service, you need a *client protocol* implementation; see
/// the crate documentation for guidance.
///
/// At the moment, this builder offers minimal configuration, but more will be
/// added over time.
#[derive(Debug)]
pub struct TcpClient<Kind, P> {
_kind: PhantomData<Kind>,
proto: Arc<P>,
}
/// A future for establishing a client connection.
///
/// Yields a service for interacting with the server.
pub struct Connect<Kind, P> {
_kind: PhantomData<Kind>,
proto: Arc<P>,
socket: TcpStreamNew,
handle: Handle,
}
impl<Kind, P> Future for Connect<Kind, P> where P: BindClient<Kind, TcpStream> {
type Item = P::BindClient;
type Error = io::Error;
fn poll(&mut self) -> Poll<P::BindClient, io::Error> {
let socket = try_ready!(self.socket.poll());
Ok(Async::Ready(self.proto.bind_client(&self.handle, socket)))
}
}
impl<Kind, P> TcpClient<Kind, P> where P: BindClient<Kind, TcpStream> {
/// Create a builder for the given client protocol.
///
/// To connect to a service, you need a *client protocol* implementation;
/// see the crate documentation for guidance.
pub fn new(protocol: P) -> TcpClient<Kind, P> {
TcpClient {
_kind: PhantomData,
proto: Arc::new(protocol)
}
}
/// Establish a connection to the given address.
///
/// # Return value
///
/// Returns a future for the establishment of the connection. When the
/// future completes, it yields an instance of `Service` for interacting
/// with the server.
pub fn | (&self, addr: &SocketAddr, handle: &Handle) -> Connect<Kind, P> {
Connect {
_kind: PhantomData,
proto: self.proto.clone(),
socket: TcpStream::connect(addr, handle),
handle: handle.clone(),
}
}
}
impl<Kind, P> fmt::Debug for Connect<Kind, P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Connect {{... }}")
}
}
| connect | identifier_name |
tcp_client.rs | use std::{fmt, io};
use std::sync::Arc;
use std::net::SocketAddr;
use std::marker::PhantomData;
use BindClient;
use tokio_core::reactor::Handle;
use tokio_core::net::{TcpStream, TcpStreamNew};
use futures::{Future, Poll, Async};
// TODO: add configuration, e.g.:
// - connection timeout
// - multiple addresses
// - request timeout
// TODO: consider global event loop handle, so that providing one in the builder
// is optional
/// Builds client connections to external services.
///
/// To connect to a service, you need a *client protocol* implementation; see
/// the crate documentation for guidance.
///
/// At the moment, this builder offers minimal configuration, but more will be
/// added over time.
#[derive(Debug)]
pub struct TcpClient<Kind, P> {
_kind: PhantomData<Kind>,
proto: Arc<P>,
}
/// A future for establishing a client connection.
///
/// Yields a service for interacting with the server.
pub struct Connect<Kind, P> {
_kind: PhantomData<Kind>,
proto: Arc<P>,
socket: TcpStreamNew,
handle: Handle,
}
impl<Kind, P> Future for Connect<Kind, P> where P: BindClient<Kind, TcpStream> {
type Item = P::BindClient;
type Error = io::Error;
fn poll(&mut self) -> Poll<P::BindClient, io::Error> {
let socket = try_ready!(self.socket.poll());
Ok(Async::Ready(self.proto.bind_client(&self.handle, socket)))
}
}
impl<Kind, P> TcpClient<Kind, P> where P: BindClient<Kind, TcpStream> {
/// Create a builder for the given client protocol.
///
/// To connect to a service, you need a *client protocol* implementation;
/// see the crate documentation for guidance.
pub fn new(protocol: P) -> TcpClient<Kind, P> {
TcpClient {
_kind: PhantomData,
proto: Arc::new(protocol)
}
}
/// Establish a connection to the given address.
///
/// # Return value
///
/// Returns a future for the establishment of the connection. When the
/// future completes, it yields an instance of `Service` for interacting
/// with the server.
pub fn connect(&self, addr: &SocketAddr, handle: &Handle) -> Connect<Kind, P> {
Connect {
_kind: PhantomData,
proto: self.proto.clone(),
socket: TcpStream::connect(addr, handle),
handle: handle.clone(),
}
}
}
impl<Kind, P> fmt::Debug for Connect<Kind, P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
| {
write!(f, "Connect {{ ... }}")
} | identifier_body |
structure.rs | // Copyright 2016 Dario Domizioli
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustc_serialize::json;
use std::io::prelude::*;
use std::fs::File;
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Chapter {
title: String,
files: Vec<String>
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Part {
title: String,
chapters: Vec<Chapter>
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Structure {
title: String,
author: String,
license: String,
parts: Vec<Part>
}
impl Structure {
pub fn from_json(js: &str) -> Result<Structure, json::DecoderError> {
json::decode::<Structure>(js)
} | pub fn get_title(&self) -> &str { &self.title }
}
#[derive(Clone, PartialEq)]
pub struct Content {
pub chunks: Vec<String>
}
impl Content {
fn build_title_page(st: &Structure) -> Result<String, String> {
let book_header =
r#"<div class="book_cover">"#.to_string() +
r#"<div class="book_author">"# +
&st.author +
r#"</div>"# +
r#"<div class="book_title">"# +
r#"<a id="kos_book_title">"# +
&st.title +
"</a></div>" +
r#"<div class="book_license">(C) "# +
&st.author +
" - " +
&st.license +
"</div></div>\n\n";
Ok(book_header)
}
fn build_toc(st: &Structure) -> Result<String, String> {
let mut toc = String::new();
toc = toc + r#"<div class="toc">"# + "\n\n";
let mut part_index = 1;
for part in st.parts.iter() {
let part_link = format!(
"- **[{0} {1}](#kos_ref_part_{0})**\n\n", part_index, part.title);
toc = toc + &part_link;
let mut chap_index = 1;
for chap in part.chapters.iter() {
let chap_link = format!(
" - *[{0}.{1} {2}](#kos_ref_chap_{0}_{1})*\n\n",
part_index, chap_index, chap.title);
toc = toc + &chap_link;
chap_index += 1;
}
part_index += 1;
}
toc = toc + "</div>\n\n";
Ok(toc)
}
fn build_chunks(st: &Structure) -> Result<Vec<String>, String> {
let mut chunks = Vec::new();
// Book cover first...
match Content::build_title_page(st) {
Ok(tp) => { chunks.push(tp); },
Err(e) => { return Err(e); }
}
// Then TOC...
match Content::build_toc(st) {
Ok(toc) => { chunks.push(toc); },
Err(e) => { return Err(e); }
}
// Then parts and chapters.
let mut part_index = 1;
for part in st.parts.iter() {
let part_header =
r#"<div class="part_"#.to_string() + // Open part div
&format!("{}", part_index) +
r#"">"# + "\n\n" +
r#"<div class="part_title">"# + // Part title div
r#"<a id="kos_ref_part_"# +
&format!("{}", part_index) +
r#"">"# +
&part.title +
"</a></div>\n\n"; // Close part title div
chunks.push(part_header);
let mut chap_index = 1;
for chap in part.chapters.iter() {
let chap_header =
r#"# <a id="kos_ref_chap_"#.to_string() +
&format!("{}", part_index) +
"_" +
&format!("{}", chap_index) +
r#""> "# +
&chap.title +
"</a>\n\n";
chunks.push(chap_header);
for f in chap.files.iter() {
let file_content = match File::open(f) {
Ok(mut fread) => {
let mut res = String::new();
match fread.read_to_string(&mut res) {
Ok(_) => (),
Err(_) => {
return Err(
"Error reading file ".to_string() +
f + "!\n");
}
}
res
},
Err(_) => {
return Err(
"Error reading file ".to_string() + f + "!\n");
}
};
chunks.push(file_content);
}
chap_index += 1;
}
chunks.push("\n\n</div>\n\n".to_string()); // Close part div
part_index += 1;
}
Ok(chunks)
}
pub fn from_structure(st: &Structure) -> Result<Content, String> {
let chunks = Content::build_chunks(st);
Ok(Content {
chunks: match chunks {
Ok(c) => c,
Err(e) => { return Err(e); }
}
})
}
pub fn to_single_string(&self) -> String {
self.chunks.iter().fold(String::new(), |acc, x| {
acc + "\n\n" + &x
})
}
} | random_line_split |
|
structure.rs | // Copyright 2016 Dario Domizioli
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustc_serialize::json;
use std::io::prelude::*;
use std::fs::File;
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Chapter {
title: String,
files: Vec<String>
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Part {
title: String,
chapters: Vec<Chapter>
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Structure {
title: String,
author: String,
license: String,
parts: Vec<Part>
}
impl Structure {
pub fn from_json(js: &str) -> Result<Structure, json::DecoderError> {
json::decode::<Structure>(js)
}
pub fn get_title(&self) -> &str { &self.title }
}
#[derive(Clone, PartialEq)]
pub struct Content {
pub chunks: Vec<String>
}
impl Content {
fn build_title_page(st: &Structure) -> Result<String, String> |
fn build_toc(st: &Structure) -> Result<String, String> {
let mut toc = String::new();
toc = toc + r#"<div class="toc">"# + "\n\n";
let mut part_index = 1;
for part in st.parts.iter() {
let part_link = format!(
"- **[{0} {1}](#kos_ref_part_{0})**\n\n", part_index, part.title);
toc = toc + &part_link;
let mut chap_index = 1;
for chap in part.chapters.iter() {
let chap_link = format!(
" - *[{0}.{1} {2}](#kos_ref_chap_{0}_{1})*\n\n",
part_index, chap_index, chap.title);
toc = toc + &chap_link;
chap_index += 1;
}
part_index += 1;
}
toc = toc + "</div>\n\n";
Ok(toc)
}
fn build_chunks(st: &Structure) -> Result<Vec<String>, String> {
let mut chunks = Vec::new();
// Book cover first...
match Content::build_title_page(st) {
Ok(tp) => { chunks.push(tp); },
Err(e) => { return Err(e); }
}
// Then TOC...
match Content::build_toc(st) {
Ok(toc) => { chunks.push(toc); },
Err(e) => { return Err(e); }
}
// Then parts and chapters.
let mut part_index = 1;
for part in st.parts.iter() {
let part_header =
r#"<div class="part_"#.to_string() + // Open part div
&format!("{}", part_index) +
r#"">"# + "\n\n" +
r#"<div class="part_title">"# + // Part title div
r#"<a id="kos_ref_part_"# +
&format!("{}", part_index) +
r#"">"# +
&part.title +
"</a></div>\n\n"; // Close part title div
chunks.push(part_header);
let mut chap_index = 1;
for chap in part.chapters.iter() {
let chap_header =
r#"# <a id="kos_ref_chap_"#.to_string() +
&format!("{}", part_index) +
"_" +
&format!("{}", chap_index) +
r#""> "# +
&chap.title +
"</a>\n\n";
chunks.push(chap_header);
for f in chap.files.iter() {
let file_content = match File::open(f) {
Ok(mut fread) => {
let mut res = String::new();
match fread.read_to_string(&mut res) {
Ok(_) => (),
Err(_) => {
return Err(
"Error reading file ".to_string() +
f + "!\n");
}
}
res
},
Err(_) => {
return Err(
"Error reading file ".to_string() + f + "!\n");
}
};
chunks.push(file_content);
}
chap_index += 1;
}
chunks.push("\n\n</div>\n\n".to_string()); // Close part div
part_index += 1;
}
Ok(chunks)
}
pub fn from_structure(st: &Structure) -> Result<Content, String> {
let chunks = Content::build_chunks(st);
Ok(Content {
chunks: match chunks {
Ok(c) => c,
Err(e) => { return Err(e); }
}
})
}
pub fn to_single_string(&self) -> String {
self.chunks.iter().fold(String::new(), |acc, x| {
acc + "\n\n" + &x
})
}
}
| {
let book_header =
r#"<div class="book_cover">"#.to_string() +
r#"<div class="book_author">"# +
&st.author +
r#"</div>"# +
r#"<div class="book_title">"# +
r#"<a id="kos_book_title">"# +
&st.title +
"</a></div>" +
r#"<div class="book_license">(C) "# +
&st.author +
" - " +
&st.license +
"</div></div>\n\n";
Ok(book_header)
} | identifier_body |
structure.rs | // Copyright 2016 Dario Domizioli
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustc_serialize::json;
use std::io::prelude::*;
use std::fs::File;
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Chapter {
title: String,
files: Vec<String>
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct Part {
title: String,
chapters: Vec<Chapter>
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
pub struct | {
title: String,
author: String,
license: String,
parts: Vec<Part>
}
impl Structure {
pub fn from_json(js: &str) -> Result<Structure, json::DecoderError> {
json::decode::<Structure>(js)
}
pub fn get_title(&self) -> &str { &self.title }
}
#[derive(Clone, PartialEq)]
pub struct Content {
pub chunks: Vec<String>
}
impl Content {
fn build_title_page(st: &Structure) -> Result<String, String> {
let book_header =
r#"<div class="book_cover">"#.to_string() +
r#"<div class="book_author">"# +
&st.author +
r#"</div>"# +
r#"<div class="book_title">"# +
r#"<a id="kos_book_title">"# +
&st.title +
"</a></div>" +
r#"<div class="book_license">(C) "# +
&st.author +
" - " +
&st.license +
"</div></div>\n\n";
Ok(book_header)
}
fn build_toc(st: &Structure) -> Result<String, String> {
let mut toc = String::new();
toc = toc + r#"<div class="toc">"# + "\n\n";
let mut part_index = 1;
for part in st.parts.iter() {
let part_link = format!(
"- **[{0} {1}](#kos_ref_part_{0})**\n\n", part_index, part.title);
toc = toc + &part_link;
let mut chap_index = 1;
for chap in part.chapters.iter() {
let chap_link = format!(
" - *[{0}.{1} {2}](#kos_ref_chap_{0}_{1})*\n\n",
part_index, chap_index, chap.title);
toc = toc + &chap_link;
chap_index += 1;
}
part_index += 1;
}
toc = toc + "</div>\n\n";
Ok(toc)
}
fn build_chunks(st: &Structure) -> Result<Vec<String>, String> {
let mut chunks = Vec::new();
// Book cover first...
match Content::build_title_page(st) {
Ok(tp) => { chunks.push(tp); },
Err(e) => { return Err(e); }
}
// Then TOC...
match Content::build_toc(st) {
Ok(toc) => { chunks.push(toc); },
Err(e) => { return Err(e); }
}
// Then parts and chapters.
let mut part_index = 1;
for part in st.parts.iter() {
let part_header =
r#"<div class="part_"#.to_string() + // Open part div
&format!("{}", part_index) +
r#"">"# + "\n\n" +
r#"<div class="part_title">"# + // Part title div
r#"<a id="kos_ref_part_"# +
&format!("{}", part_index) +
r#"">"# +
&part.title +
"</a></div>\n\n"; // Close part title div
chunks.push(part_header);
let mut chap_index = 1;
for chap in part.chapters.iter() {
let chap_header =
r#"# <a id="kos_ref_chap_"#.to_string() +
&format!("{}", part_index) +
"_" +
&format!("{}", chap_index) +
r#""> "# +
&chap.title +
"</a>\n\n";
chunks.push(chap_header);
for f in chap.files.iter() {
let file_content = match File::open(f) {
Ok(mut fread) => {
let mut res = String::new();
match fread.read_to_string(&mut res) {
Ok(_) => (),
Err(_) => {
return Err(
"Error reading file ".to_string() +
f + "!\n");
}
}
res
},
Err(_) => {
return Err(
"Error reading file ".to_string() + f + "!\n");
}
};
chunks.push(file_content);
}
chap_index += 1;
}
chunks.push("\n\n</div>\n\n".to_string()); // Close part div
part_index += 1;
}
Ok(chunks)
}
pub fn from_structure(st: &Structure) -> Result<Content, String> {
let chunks = Content::build_chunks(st);
Ok(Content {
chunks: match chunks {
Ok(c) => c,
Err(e) => { return Err(e); }
}
})
}
pub fn to_single_string(&self) -> String {
self.chunks.iter().fold(String::new(), |acc, x| {
acc + "\n\n" + &x
})
}
}
| Structure | identifier_name |
htmltablecolelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLTableColElement {
htmlelement: HTMLElement,
}
impl HTMLTableColElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLTableColElement {
HTMLTableColElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document, | )),
document,
);
n.upcast::<Node>().set_weird_parser_insertion_mode();
n
}
} | ) -> DomRoot<HTMLTableColElement> {
let n = Node::reflect_node(
Box::new(HTMLTableColElement::new_inherited(
local_name, prefix, document, | random_line_split |
htmltablecolelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLTableColElement {
htmlelement: HTMLElement,
}
impl HTMLTableColElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLTableColElement {
HTMLTableColElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLTableColElement> |
}
| {
let n = Node::reflect_node(
Box::new(HTMLTableColElement::new_inherited(
local_name, prefix, document,
)),
document,
);
n.upcast::<Node>().set_weird_parser_insertion_mode();
n
} | identifier_body |
htmltablecolelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLTableColElement {
htmlelement: HTMLElement,
}
impl HTMLTableColElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLTableColElement {
HTMLTableColElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn | (
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLTableColElement> {
let n = Node::reflect_node(
Box::new(HTMLTableColElement::new_inherited(
local_name, prefix, document,
)),
document,
);
n.upcast::<Node>().set_weird_parser_insertion_mode();
n
}
}
| new | identifier_name |
error.rs | use rustc_serialize::json;
use rustc_serialize::Decoder;
use rustc_serialize::Decodable;
use std::str;
use std::fmt;
/// Documentation References:
/// https://developer.github.com/v3/#client-errors
/// `ErrorCode` represents the type of error that was reported
/// as a response on a request to th Github API.
#[derive(Debug)]
pub enum ErrorCode {
/// This means a resource does not exist.
Missing,
/// This means a required field on a resource has not been set.
MissingField,
/// This means the formatting of a field is invalid.
/// The documentation for that resource should be able
/// to give you more specific information.
Invalid,
/// This means another resource has the same value as this field.
/// This can happen in resources that must
/// have some unique key (such as Label names).
AlreadyExists,
/// `Unknown(String)` is used as a last resort when an error code is unknown.
/// This should never happen, please report/resolve the issue when it does happen.
Unknown(String),
}
/// Allowing `ErrorCode` to be printed via `{}`.
impl fmt::Display for ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg: &str = match *self {
ErrorCode::Missing => "resource does not exist",
ErrorCode::MissingField => "required field on the resource has not been set",
ErrorCode::Invalid => "the formatting of the field is invalid",
ErrorCode::AlreadyExists => "another resource has the same value as this field",
ErrorCode::Unknown(ref s) => &s,
};
write!(f, "{}", msg)
}
}
/// Allowing `ErrorCode` to be decoded from json values.
/// Linked to the `error` key as defind by the `ErrorContext` struct's member.
impl Decodable for ErrorCode {
fn decode<D: Decoder>(d: &mut D) -> Result<ErrorCode, D::Error> {
match d.read_str() {
Ok(code) => Ok(match &*code {
"missing" => ErrorCode::Missing,
"missing_field" => ErrorCode::MissingField,
"invalid" => ErrorCode::Invalid,
"already_exists" => ErrorCode::AlreadyExists,
unknown => ErrorCode::Unknown(unknown.to_string()),
}),
Err(err) => Err(err),
}
}
}
/// When a request was successful.
const STATUS_OK: u32 = 200;
/// There was a problem with the data sent with the request.
const STATUS_BAD_REQUEST: u32 = 400;
/// Given as a response to requests the user has insufficient permissions for.
const STATUS_FORBIDDEN: u32 = 403;
/// Given when the info requested is not found because it
/// either doesn't exist or because you are not authorized.
const STATUS_NOT_FOUND: u32 = 404;
/// Given when a field or resource couldn't be processed properly.
const STATUS_UNPROCCESSABLE_ENTITY: u32 = 422;
/// When a negative status was given as a response to a request,
/// there might be one or several error descriptions embedded in the
/// body to tell more about the details of what was wrong.
/// `ErrorContext` is the representation for each of the errors that are given.
#[derive(RustcDecodable, Debug)]
pub struct ErrorContext {
pub resource: String,
pub field: String,
pub code: ErrorCode,
}
/// Allowing `ErrorContext` to be printed via `{}` in a controlled manner.
impl fmt::Display for ErrorContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error found in {}.{}: {}", self.resource, self.field, self.code)
}
}
/// `ErrorStatus` represents the status code given in the header of a negative response.
/// Look at const definitions such as `STATUS_OK` for more information for each value.
#[derive(Debug)]
pub enum ErrorStatus{
BadRequest,
UnprocessableEntity,
Forbidden,
NotFound,
Unknown(u32),
}
/// Allowing `ErrorStatus` to be printed via `{}` in a controlled manner.
impl fmt::Display for ErrorStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (code, msg) = match *self {
ErrorStatus::BadRequest => (STATUS_BAD_REQUEST, "Bad Request"),
ErrorStatus::UnprocessableEntity => (STATUS_UNPROCCESSABLE_ENTITY, "Unprocessable Entity"),
ErrorStatus::Forbidden => (STATUS_FORBIDDEN, "Forbidden Request"),
ErrorStatus::NotFound => (STATUS_NOT_FOUND, "Not Found"),
ErrorStatus::Unknown(e) => (e, "Unknown"),
};
write!(f, "status {}: {}", code, msg)
}
}
impl ErrorStatus {
/// Simple way to construct an `ErrorStatus`
/// based on its constant value as defined by the official docs.
pub fn new(code: u32) -> ErrorStatus {
match code {
STATUS_BAD_REQUEST => ErrorStatus::BadRequest,
STATUS_FORBIDDEN => ErrorStatus::Forbidden,
STATUS_UNPROCCESSABLE_ENTITY => ErrorStatus::UnprocessableEntity,
STATUS_NOT_FOUND => ErrorStatus::NotFound,
unknown => ErrorStatus::Unknown(unknown),
}
}
}
/// `RequestError` will be returned as a `Result<T, ClientError>` in case
/// a request responds negatively populated by information from
/// both the header and body.
#[derive(Debug)]
pub struct RequestError {
/// `code` represents the given status code
/// stored in the form of `ErrorStatus`.
pub code: ErrorStatus,
/// In case detailed errors are available
// they will be accessible via `errors`, stored as an `ErrorContext`.
pub errors: Vec<ErrorContext>,
}
impl RequestError {
/// Simple way to construct a `Result<T, ClientError>` based on
/// the status code given in the header and the body in a raw utf8 buffer.
pub fn new<T>(code: u32, buffer: &[u8]) -> Result<T, ClientError> {
Err(ClientError::Http(RequestError {
code: ErrorStatus::new(code),
errors: match str::from_utf8(buffer) {
Err(..) => Vec::new(),
Ok(body) => json::decode(body).unwrap_or(Vec::new()),
},
}))
}
}
/// Allowing `RequestError` to be printed via `{}` in a controlled manner.
impl fmt::Display for RequestError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HTTP Error: {}. Found {} error description(s)!", self.code, self.errors.len())
}
}
/// `InternalError` will be given in the form of Result<T, ClientError> in
/// case something went wrong within this Client Library.
/// It replaces panics so that you can freely choose the behaviour.
/// Please file an issue and/or resolve the bug yourself when you get this error.
#[derive(Debug)]
pub struct InternalError {
/// `msg` is the actual description of the problem.
/// future versions of this library might store extra info
/// where it would help the debugging of an error.
pub msg: String,
}
impl InternalError {
/// Simple way to construct a `Result<T, ClientError>` based on
/// information known for an internal error.
pub fn new<T>(msg: &str) -> Result<T, ClientError> {
Err(ClientError::Internal(InternalError { msg: msg.to_string() }))
}
}
/// Allowing `InternalError` to be printed via `{}` in a controlled manner.
impl fmt::Display for InternalError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Internal Error: {}", self.msg)
}
}
/// `ClientError` enumerates all the possible errors that a public
/// client (request) function of this library might be given.
#[derive(Debug)]
pub enum ClientError {
/// Read the documentation for `RequestError`
/// for more information on this error.
Http(RequestError),
/// Read the documentation for `InternalError`
/// for more information on this error..
Internal(InternalError)
}
/// Allowing `ClientError` to be printed via `{}` in a controlled manner.
impl fmt::Display for ClientError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ClientError::Http(ref e) => write!(f, "{}", e),
&ClientError::Internal(ref e) => write!(f, "{}", e),
}
}
}
/// Simplistic function internally used to check
/// if a returned status code is positive.
/// Which means that the request was succesful.
pub fn | (code: u32) -> bool {
code == STATUS_OK
}
| check_status_code | identifier_name |
error.rs | use rustc_serialize::json;
use rustc_serialize::Decoder;
use rustc_serialize::Decodable;
use std::str;
use std::fmt;
/// Documentation References:
/// https://developer.github.com/v3/#client-errors
/// `ErrorCode` represents the type of error that was reported
/// as a response on a request to th Github API.
#[derive(Debug)]
pub enum ErrorCode {
/// This means a resource does not exist.
Missing,
/// This means a required field on a resource has not been set.
MissingField,
/// This means the formatting of a field is invalid.
/// The documentation for that resource should be able
/// to give you more specific information.
Invalid,
/// This means another resource has the same value as this field.
/// This can happen in resources that must
/// have some unique key (such as Label names).
AlreadyExists,
/// `Unknown(String)` is used as a last resort when an error code is unknown.
/// This should never happen, please report/resolve the issue when it does happen. | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg: &str = match *self {
ErrorCode::Missing => "resource does not exist",
ErrorCode::MissingField => "required field on the resource has not been set",
ErrorCode::Invalid => "the formatting of the field is invalid",
ErrorCode::AlreadyExists => "another resource has the same value as this field",
ErrorCode::Unknown(ref s) => &s,
};
write!(f, "{}", msg)
}
}
/// Allowing `ErrorCode` to be decoded from json values.
/// Linked to the `error` key as defind by the `ErrorContext` struct's member.
impl Decodable for ErrorCode {
fn decode<D: Decoder>(d: &mut D) -> Result<ErrorCode, D::Error> {
match d.read_str() {
Ok(code) => Ok(match &*code {
"missing" => ErrorCode::Missing,
"missing_field" => ErrorCode::MissingField,
"invalid" => ErrorCode::Invalid,
"already_exists" => ErrorCode::AlreadyExists,
unknown => ErrorCode::Unknown(unknown.to_string()),
}),
Err(err) => Err(err),
}
}
}
/// When a request was successful.
const STATUS_OK: u32 = 200;
/// There was a problem with the data sent with the request.
const STATUS_BAD_REQUEST: u32 = 400;
/// Given as a response to requests the user has insufficient permissions for.
const STATUS_FORBIDDEN: u32 = 403;
/// Given when the info requested is not found because it
/// either doesn't exist or because you are not authorized.
const STATUS_NOT_FOUND: u32 = 404;
/// Given when a field or resource couldn't be processed properly.
const STATUS_UNPROCCESSABLE_ENTITY: u32 = 422;
/// When a negative status was given as a response to a request,
/// there might be one or several error descriptions embedded in the
/// body to tell more about the details of what was wrong.
/// `ErrorContext` is the representation for each of the errors that are given.
#[derive(RustcDecodable, Debug)]
pub struct ErrorContext {
pub resource: String,
pub field: String,
pub code: ErrorCode,
}
/// Allowing `ErrorContext` to be printed via `{}` in a controlled manner.
impl fmt::Display for ErrorContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error found in {}.{}: {}", self.resource, self.field, self.code)
}
}
/// `ErrorStatus` represents the status code given in the header of a negative response.
/// Look at const definitions such as `STATUS_OK` for more information for each value.
#[derive(Debug)]
pub enum ErrorStatus{
BadRequest,
UnprocessableEntity,
Forbidden,
NotFound,
Unknown(u32),
}
/// Allowing `ErrorStatus` to be printed via `{}` in a controlled manner.
impl fmt::Display for ErrorStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (code, msg) = match *self {
ErrorStatus::BadRequest => (STATUS_BAD_REQUEST, "Bad Request"),
ErrorStatus::UnprocessableEntity => (STATUS_UNPROCCESSABLE_ENTITY, "Unprocessable Entity"),
ErrorStatus::Forbidden => (STATUS_FORBIDDEN, "Forbidden Request"),
ErrorStatus::NotFound => (STATUS_NOT_FOUND, "Not Found"),
ErrorStatus::Unknown(e) => (e, "Unknown"),
};
write!(f, "status {}: {}", code, msg)
}
}
impl ErrorStatus {
/// Simple way to construct an `ErrorStatus`
/// based on its constant value as defined by the official docs.
pub fn new(code: u32) -> ErrorStatus {
match code {
STATUS_BAD_REQUEST => ErrorStatus::BadRequest,
STATUS_FORBIDDEN => ErrorStatus::Forbidden,
STATUS_UNPROCCESSABLE_ENTITY => ErrorStatus::UnprocessableEntity,
STATUS_NOT_FOUND => ErrorStatus::NotFound,
unknown => ErrorStatus::Unknown(unknown),
}
}
}
/// `RequestError` will be returned as a `Result<T, ClientError>` in case
/// a request responds negatively populated by information from
/// both the header and body.
#[derive(Debug)]
pub struct RequestError {
/// `code` represents the given status code
/// stored in the form of `ErrorStatus`.
pub code: ErrorStatus,
/// In case detailed errors are available
// they will be accessible via `errors`, stored as an `ErrorContext`.
pub errors: Vec<ErrorContext>,
}
impl RequestError {
/// Simple way to construct a `Result<T, ClientError>` based on
/// the status code given in the header and the body in a raw utf8 buffer.
pub fn new<T>(code: u32, buffer: &[u8]) -> Result<T, ClientError> {
Err(ClientError::Http(RequestError {
code: ErrorStatus::new(code),
errors: match str::from_utf8(buffer) {
Err(..) => Vec::new(),
Ok(body) => json::decode(body).unwrap_or(Vec::new()),
},
}))
}
}
/// Allowing `RequestError` to be printed via `{}` in a controlled manner.
impl fmt::Display for RequestError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HTTP Error: {}. Found {} error description(s)!", self.code, self.errors.len())
}
}
/// `InternalError` will be given in the form of Result<T, ClientError> in
/// case something went wrong within this Client Library.
/// It replaces panics so that you can freely choose the behaviour.
/// Please file an issue and/or resolve the bug yourself when you get this error.
#[derive(Debug)]
pub struct InternalError {
/// `msg` is the actual description of the problem.
/// future versions of this library might store extra info
/// where it would help the debugging of an error.
pub msg: String,
}
impl InternalError {
/// Simple way to construct a `Result<T, ClientError>` based on
/// information known for an internal error.
pub fn new<T>(msg: &str) -> Result<T, ClientError> {
Err(ClientError::Internal(InternalError { msg: msg.to_string() }))
}
}
/// Allowing `InternalError` to be printed via `{}` in a controlled manner.
impl fmt::Display for InternalError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Internal Error: {}", self.msg)
}
}
/// `ClientError` enumerates all the possible errors that a public
/// client (request) function of this library might be given.
#[derive(Debug)]
pub enum ClientError {
/// Read the documentation for `RequestError`
/// for more information on this error.
Http(RequestError),
/// Read the documentation for `InternalError`
/// for more information on this error..
Internal(InternalError)
}
/// Allowing `ClientError` to be printed via `{}` in a controlled manner.
impl fmt::Display for ClientError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ClientError::Http(ref e) => write!(f, "{}", e),
&ClientError::Internal(ref e) => write!(f, "{}", e),
}
}
}
/// Simplistic function internally used to check
/// if a returned status code is positive.
/// Which means that the request was succesful.
pub fn check_status_code(code: u32) -> bool {
code == STATUS_OK
} | Unknown(String),
}
/// Allowing `ErrorCode` to be printed via `{}`.
impl fmt::Display for ErrorCode { | random_line_split |
check_internal.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/gfx/check_internal.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
Provides a handy macro to check the outcome
of an OpenGL call for errors -- use it everywhere.
*/
#[cfg(check_gl)]
#[macro_escape] | {
use gl2 = opengles::gl2;
use log::Log;
let err = gl2::get_error();
if err!= gl2::NO_ERROR
{
log_error!(func);
log_fail!(util::get_err_str(err));
}
}
#[cfg(not(check_gl))]
pub fn check_gl(_func: &str)
{ }
macro_rules! check
(
($func:expr) =>
({
let ret = $func;
check::check_gl(stringify!($func));
ret
});
)
macro_rules! check_unsafe
(
($func:expr) =>
({
unsafe { check!($func) }
});
) | #[path = "../log/macros.rs"]
mod macros;
#[cfg(check_gl)]
pub fn check_gl(func: &str) | random_line_split |
check_internal.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/gfx/check_internal.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
Provides a handy macro to check the outcome
of an OpenGL call for errors -- use it everywhere.
*/
#[cfg(check_gl)]
#[macro_escape]
#[path = "../log/macros.rs"]
mod macros;
#[cfg(check_gl)]
pub fn check_gl(func: &str)
{
use gl2 = opengles::gl2;
use log::Log;
let err = gl2::get_error();
if err!= gl2::NO_ERROR
{
log_error!(func);
log_fail!(util::get_err_str(err));
}
}
#[cfg(not(check_gl))]
pub fn | (_func: &str)
{ }
macro_rules! check
(
($func:expr) =>
({
let ret = $func;
check::check_gl(stringify!($func));
ret
});
)
macro_rules! check_unsafe
(
($func:expr) =>
({
unsafe { check!($func) }
});
)
| check_gl | identifier_name |
check_internal.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/gfx/check_internal.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
Provides a handy macro to check the outcome
of an OpenGL call for errors -- use it everywhere.
*/
#[cfg(check_gl)]
#[macro_escape]
#[path = "../log/macros.rs"]
mod macros;
#[cfg(check_gl)]
pub fn check_gl(func: &str)
{
use gl2 = opengles::gl2;
use log::Log;
let err = gl2::get_error();
if err!= gl2::NO_ERROR
{
log_error!(func);
log_fail!(util::get_err_str(err));
}
}
#[cfg(not(check_gl))]
pub fn check_gl(_func: &str)
|
macro_rules! check
(
($func:expr) =>
({
let ret = $func;
check::check_gl(stringify!($func));
ret
});
)
macro_rules! check_unsafe
(
($func:expr) =>
({
unsafe { check!($func) }
});
)
| { } | identifier_body |
check_internal.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/gfx/check_internal.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
Provides a handy macro to check the outcome
of an OpenGL call for errors -- use it everywhere.
*/
#[cfg(check_gl)]
#[macro_escape]
#[path = "../log/macros.rs"]
mod macros;
#[cfg(check_gl)]
pub fn check_gl(func: &str)
{
use gl2 = opengles::gl2;
use log::Log;
let err = gl2::get_error();
if err!= gl2::NO_ERROR
|
}
#[cfg(not(check_gl))]
pub fn check_gl(_func: &str)
{ }
macro_rules! check
(
($func:expr) =>
({
let ret = $func;
check::check_gl(stringify!($func));
ret
});
)
macro_rules! check_unsafe
(
($func:expr) =>
({
unsafe { check!($func) }
});
)
| {
log_error!(func);
log_fail!(util::get_err_str(err));
} | conditional_block |
env.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Runtime environment settings
use libc::{size_t, c_char, c_int};
pub struct Environment {
/// The number of threads to use by default
num_sched_threads: size_t,
/// The minimum size of a stack segment
min_stack_size: size_t,
/// The maximum amount of total stack per task before aborting
max_stack_size: size_t,
/// The default logging configuration | /// Poison allocations on free
poison_on_free: bool,
/// The argc value passed to main
argc: c_int,
/// The argv value passed to main
argv: **c_char,
/// Print GC debugging info
debug_mem: bool
}
/// Get the global environment settings
/// # Safety Note
/// This will abort the process if run outside of task context
pub fn get() -> &Environment {
unsafe { rust_get_rt_env() }
}
extern {
fn rust_get_rt_env() -> &Environment;
} | logspec: *c_char,
/// Record and report detailed information about memory leaks
detailed_leaks: bool,
/// Seed the random number generator
rust_seed: *c_char, | random_line_split |
env.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Runtime environment settings
use libc::{size_t, c_char, c_int};
pub struct | {
/// The number of threads to use by default
num_sched_threads: size_t,
/// The minimum size of a stack segment
min_stack_size: size_t,
/// The maximum amount of total stack per task before aborting
max_stack_size: size_t,
/// The default logging configuration
logspec: *c_char,
/// Record and report detailed information about memory leaks
detailed_leaks: bool,
/// Seed the random number generator
rust_seed: *c_char,
/// Poison allocations on free
poison_on_free: bool,
/// The argc value passed to main
argc: c_int,
/// The argv value passed to main
argv: **c_char,
/// Print GC debugging info
debug_mem: bool
}
/// Get the global environment settings
/// # Safety Note
/// This will abort the process if run outside of task context
pub fn get() -> &Environment {
unsafe { rust_get_rt_env() }
}
extern {
fn rust_get_rt_env() -> &Environment;
}
| Environment | identifier_name |
env.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Runtime environment settings
use libc::{size_t, c_char, c_int};
pub struct Environment {
/// The number of threads to use by default
num_sched_threads: size_t,
/// The minimum size of a stack segment
min_stack_size: size_t,
/// The maximum amount of total stack per task before aborting
max_stack_size: size_t,
/// The default logging configuration
logspec: *c_char,
/// Record and report detailed information about memory leaks
detailed_leaks: bool,
/// Seed the random number generator
rust_seed: *c_char,
/// Poison allocations on free
poison_on_free: bool,
/// The argc value passed to main
argc: c_int,
/// The argv value passed to main
argv: **c_char,
/// Print GC debugging info
debug_mem: bool
}
/// Get the global environment settings
/// # Safety Note
/// This will abort the process if run outside of task context
pub fn get() -> &Environment |
extern {
fn rust_get_rt_env() -> &Environment;
}
| {
unsafe { rust_get_rt_env() }
} | identifier_body |
task.rs | Runtime;
use local::Local;
use local_heap::LocalHeap;
use rtio::LocalIo;
use unwind;
use unwind::Unwinder;
use collections::str::SendStr;
/// State associated with Rust tasks.
///
/// Rust tasks are primarily built with two separate components. One is this
/// structure which handles standard services such as TLD, unwinding support,
/// naming of a task, etc. The second component is the runtime of this task, a
/// `Runtime` trait object.
///
/// The `Runtime` object instructs this task how it can perform critical
/// operations such as blocking, rescheduling, I/O constructors, etc. The two
/// halves are separately owned, but one is often found contained in the other.
/// A task's runtime can be reflected upon with the `maybe_take_runtime` method,
/// and otherwise its ownership is managed with `take_runtime` and
/// `put_runtime`.
///
/// In general, this structure should not be used. This is meant to be an
/// unstable internal detail of the runtime itself. From time-to-time, however,
/// it is useful to manage tasks directly. An example of this would be
/// interoperating with the Rust runtime from FFI callbacks or such. For this
/// reason, there are two methods of note with the `Task` structure.
///
/// * `run` - This function will execute a closure inside the context of a task.
/// Failure is caught and handled via the task's on_exit callback. If
/// this fails, the task is still returned, but it can no longer be
/// used, it is poisoned.
///
/// * `destroy` - This is a required function to call to destroy a task. If a
/// task falls out of scope without calling `destroy`, its
/// destructor bomb will go off, aborting the process.
///
/// With these two methods, tasks can be re-used to execute code inside of its
/// context while having a point in the future where destruction is allowed.
/// More information can be found on these specific methods.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a task using a native runtime
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code, catching any possible failures
/// let task = task.run(|| {
/// // Run some code inside this task
/// println!("Hello with a native runtime!");
/// });
///
/// // Run some code again, catching the failure
/// let task = task.run(|| {
/// fail!("oh no, what to do!");
/// });
///
/// // Now that the task is failed, it can never be used again
/// assert!(task.is_destroyed());
///
/// // Deallocate the resources associated with this task
/// task.destroy();
/// # }
/// ```
pub struct Task {
pub heap: LocalHeap,
pub gc: GarbageCollector,
pub storage: LocalStorage,
pub unwinder: Unwinder,
pub death: Death,
pub name: Option<SendStr>,
state: TaskState,
imp: Option<Box<Runtime + Send +'static>>,
}
// Once a task has entered the `Armed` state it must be destroyed via `drop`,
// and no other method. This state is used to track this transition.
#[deriving(PartialEq)]
enum TaskState {
New,
Armed,
Destroyed,
}
pub struct TaskOpts {
/// Invoke this procedure with the result of the task when it finishes.
pub on_exit: Option<proc(Result): Send>,
/// A name for the task-to-be, for identification in failure messages
pub name: Option<SendStr>,
/// The size of the stack for the spawned task
pub stack_size: Option<uint>,
}
/// Indicates the manner in which a task exited.
///
/// A task that completes without failing is considered to exit successfully.
///
/// If you wish for this result's delivery to block until all
/// children tasks complete, recommend using a result future.
pub type Result = ::core::result::Result<(), Box<Any + Send>>;
pub struct GarbageCollector;
pub struct LocalStorage(pub Option<local_data::Map>);
/// A handle to a blocked task. Usually this means having the Box<Task>
/// pointer by ownership, but if the task is killable, a killer can steal it
/// at any time.
pub enum BlockedTask {
Owned(Box<Task>),
Shared(Arc<AtomicUint>),
}
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
pub on_exit: Option<proc(Result):Send>,
marker: marker::NoCopy,
}
pub struct BlockedTasks {
inner: Arc<AtomicUint>,
}
impl Task {
/// Creates a new uninitialized task.
///
/// This method cannot be used to immediately invoke `run` because the task
/// itself will likely require a runtime to be inserted via `put_runtime`.
///
/// Note that you likely don't want to call this function, but rather the
/// task creation functions through libnative or libgreen.
pub fn new() -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
unwinder: Unwinder::new(),
death: Death::new(),
state: New,
name: None,
imp: None,
}
}
/// Consumes ownership of a task, runs some code, and returns the task back.
///
/// This function can be used as an emulated "try/catch" to interoperate
/// with the rust runtime at the outermost boundary. It is not possible to
/// use this function in a nested fashion (a try/catch inside of another
/// try/catch). Invoking this function is quite cheap.
///
/// If the closure `f` succeeds, then the returned task can be used again
/// for another invocation of `run`. If the closure `f` fails then `self`
/// will be internally destroyed along with all of the other associated
/// resources of this task. The `on_exit` callback is invoked with the
/// cause of failure (not returned here). This can be discovered by querying
/// `is_destroyed()`.
///
/// Note that it is possible to view partial execution of the closure `f`
/// because it is not guaranteed to run to completion, but this function is
/// guaranteed to return if it fails. Care should be taken to ensure that
/// stack references made by `f` are handled appropriately.
///
/// It is invalid to call this function with a task that has been previously
/// destroyed via a failed call to `run`.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a new native task
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code once and then destroy this task | /// println!("Hello with a native runtime!");
/// }).destroy();
/// # }
/// ```
pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
// First, make sure that no one else is in TLS. This does not allow
// recursive invocations of run(). If there's no one else, then
// relinquish ownership of ourselves back into TLS.
if Local::exists(None::<Task>) {
fail!("cannot run a task recursively inside another");
}
self.state = Armed;
Local::put(self);
// There are two primary reasons that general try/catch is unsafe. The
// first is that we do not support nested try/catch. The above check for
// an existing task in TLS is sufficient for this invariant to be
// upheld. The second is that unwinding while unwinding is not defined.
// We take care of that by having an 'unwinding' flag in the task
// itself. For these reasons, this unsafety should be ok.
let result = unsafe { unwind::try(f) };
// After running the closure given return the task back out if it ran
// successfully, or clean up the task if it failed.
let task: Box<Task> = Local::take();
match result {
Ok(()) => task,
Err(cause) => { task.cleanup(Err(cause)) }
}
}
/// Destroy all associated resources of this task.
///
/// This function will perform any necessary clean up to prepare the task
/// for destruction. It is required that this is called before a `Task`
/// falls out of scope.
///
/// The returned task cannot be used for running any more code, but it may
/// be used to extract the runtime as necessary.
pub fn destroy(self: Box<Task>) -> Box<Task> {
if self.is_destroyed() {
self
} else {
self.cleanup(Ok(()))
}
}
/// Cleans up a task, processing the result of the task as appropriate.
///
/// This function consumes ownership of the task, deallocating it once it's
/// done being processed. It is assumed that TLD and the local heap have
/// already been destroyed and/or annihilated.
fn cleanup(self: Box<Task>, result: Result) -> Box<Task> {
// The first thing to do when cleaning up is to deallocate our local
// resources, such as TLD and GC data.
//
// FIXME: there are a number of problems with this code
//
// 1. If any TLD object fails destruction, then all of TLD will leak.
// This appears to be a consequence of #14875.
//
// 2. Failing during GC annihilation aborts the runtime #14876.
//
// 3. Setting a TLD key while destroying TLD or while destroying GC will
// abort the runtime #14807.
//
// 4. Invoking GC in GC destructors will abort the runtime #6996.
//
// 5. The order of destruction of TLD and GC matters, but either way is
// susceptible to leaks (see 3/4) #8302.
//
// That being said, there are a few upshots to this code
//
// 1. If TLD destruction fails, heap destruction will be attempted.
// There is a test for this at fail-during-tld-destroy.rs. Sadly the
// other way can't be tested due to point 2 above. Note that we must
// immortalize the heap first because if any deallocations are
// attempted while TLD is being dropped it will attempt to free the
// allocation from the wrong heap (because the current one has been
// replaced).
//
// 2. One failure in destruction is tolerable, so long as the task
// didn't originally fail while it was running.
//
// And with all that in mind, we attempt to clean things up!
let mut task = self.run(|| {
let mut task = Local::borrow(None::<Task>);
let tld = {
let &LocalStorage(ref mut optmap) = &mut task.storage;
optmap.take()
};
let mut heap = mem::replace(&mut task.heap, LocalHeap::new());
unsafe { heap.immortalize() }
drop(task);
// First, destroy task-local storage. This may run user dtors.
drop(tld);
// Destroy remaining boxes. Also may run user dtors.
drop(heap);
});
// If the above `run` block failed, then it must be the case that the
// task had previously succeeded. This also means that the code below
// was recursively run via the `run` method invoking this method. In
// this case, we just make sure the world is as we thought, and return.
if task.is_destroyed() {
rtassert!(result.is_ok())
return task
}
// After taking care of the data above, we need to transmit the result
// of this task.
let what_to_do = task.death.on_exit.take();
Local::put(task);
// FIXME: this is running in a seriously constrained context. If this
// allocates GC or allocates TLD then it will likely abort the
// runtime. Similarly, if this fails, this will also likely abort
// the runtime.
//
// This closure is currently limited to a channel send via the
// standard library's task interface, but this needs
// reconsideration to whether it's a reasonable thing to let a
// task to do or not.
match what_to_do {
Some(f) => { f(result) }
None => { drop(result) }
}
// Now that we're done, we remove the task from TLS and flag it for
// destruction.
let mut task: Box<Task> = Local::take();
task.state = Destroyed;
return task;
}
/// Queries whether this can be destroyed or not.
pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
/// Inserts a runtime object into this task, transferring ownership to the
/// task. It is illegal to replace a previous runtime object in this task
/// with this argument.
pub fn put_runtime(&mut self, ops: Box<Runtime + Send +'static>) {
assert!(self.imp.is_none());
self.imp = Some(ops);
}
/// Removes the runtime from this task, transferring ownership to the
/// caller.
pub fn take_runtime(&mut self) -> Box<Runtime + Send +'static> {
assert!(self.imp.is_some());
self.imp.take().unwrap()
}
/// Attempts to extract the runtime as a specific type. If the runtime does
/// not have the provided type, then the runtime is not removed. If the
/// runtime does have the specified type, then it is removed and returned
/// (transfer of ownership).
///
/// It is recommended to only use this method when *absolutely necessary*.
/// This function may not be available in the future.
pub fn maybe_take_runtime<T:'static>(&mut self) -> Option<Box<T>> {
// This is a terrible, terrible function. The general idea here is to
// take the runtime, cast it to Box<Any>, check if it has the right
// type, and then re-cast it back if necessary. The method of doing
// this is pretty sketchy and involves shuffling vtables of trait
// objects around, but it gets the job done.
//
// FIXME: This function is a serious code smell and should be avoided at
// all costs. I have yet to think of a method to avoid this
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
let imp = self.imp.take().unwrap();
let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable;
match imp.wrap().downcast::<T>() {
Ok(t) => Some(t),
Err(t) => {
let data = mem::transmute::<_, raw::TraitObject>(t).data;
let obj: Box<Runtime + Send +'static> =
mem::transmute(raw::TraitObject {
vtable: vtable,
data: data,
});
self.put_runtime(obj);
None
}
}
}
}
/// Spawns a sibling to this task. The newly spawned task is configured with
/// the `opts` structure and will run `f` as the body of its code.
pub fn spawn_sibling(mut self: Box<Task>,
opts: TaskOpts,
f: proc(): Send) {
let ops = self.imp.take().unwrap();
ops.spawn_sibling(self, opts, f)
}
/// Deschedules the current task, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
pub fn deschedule(mut self: Box<Task>,
amt: uint,
f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) {
let ops = self.imp.take().unwrap();
ops.deschedule(amt, self, f)
}
/// Wakes up a previously blocked task, optionally specifying whether the
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
pub fn reawaken(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.reawaken(self);
}
/// Yields control of this task to another task. This function will
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn yield_now(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.maybe_yield(self);
}
/// Acquires a handle to the I/O factory that this task contains, normally
/// stored in the task's runtime. This factory may not always be available,
/// which is why the return type is `Option`
pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
self.imp.as_mut().unwrap().local_io()
}
/// Returns the stack bounds for this task in (lo, hi) format. The stack
/// bounds may not be known for all tasks, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
self.imp.as_ref().unwrap().stack_bounds()
}
/// Returns whether it is legal for this task to block the OS thread that it
/// is running on.
pub fn can_block(&self) -> bool {
self.imp.as_ref().unwrap().can_block()
}
/// Consume this task, flagging it as a candidate for destruction.
///
/// This function is required to be invoked to destroy a task. A task
/// destroyed through a normal drop will abort.
pub fn drop(mut self) {
self.state = Destroyed;
}
}
impl Drop for Task {
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", self as *mut Task as uint);
rtassert!(self.state!= Armed);
}
}
impl TaskOpts {
pub fn new() -> TaskOpts {
TaskOpts { on_exit: None, name: None, stack_size: None }
}
}
impl Iterator<BlockedTask> for BlockedTasks {
fn next(&mut self) -> Option<BlockedTask> {
Some(Shared(self.inner.clone()))
}
}
impl BlockedTask {
/// Returns Some if the task was successfully woken; None if already killed.
pub fn wake(self) -> Option<Box<Task>> {
match self {
Owned(task) => Some(task),
Shared(arc) => {
match arc.swap(0, SeqCst) {
0 => None,
n => Some(unsafe { mem::transmute(n) }),
}
}
}
}
/// Reawakens this task if ownership is acquired. If finer-grained control
/// is desired, use `wake` instead.
pub fn reawaken(self) {
self.wake().map(|t| t.reawaken());
}
// This assertion has two flavours because the wake involves an atomic op.
// In the faster version, destructors will fail dramatically instead.
#[cfg(not(test))] pub fn trash(self) { }
#[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); }
/// Create a blocked task, unless the task was already killed.
pub fn block(task: Box<Task>) -> BlockedTask {
Owned(task)
}
/// Converts one blocked task handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
Arc::new(flag)
}
Shared(arc) => arc.clone(),
};
BlockedTasks{ inner: arc }.take(num_handles)
}
/// Convert to an unsafe uint value. Useful for storing in a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_to_uint(self) -> uint {
match self {
Owned(task) => {
let blocked_task_ptr: uint = mem::transmute(task);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr
}
Shared(arc) => {
let blocked_task_ptr: uint = mem::transmute(box arc);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr | 0x1
}
}
}
/// Convert from an unsafe uint value. Useful for retrieving a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
if blocked_task_ptr & 0x1 == 0 {
Owned(mem::transmute(blocked_task_ptr))
} else {
let ptr: Box<Arc<AtomicUint>> =
mem::transmute(blocked_task_ptr &!1);
Shared(*ptr)
}
}
}
impl Death {
pub fn new() -> Death {
Death { on_exit: None, marker: marker::NoCopy }
}
}
#[cfg(test)]
mod test {
use super::*;
use std::prelude::*;
use std::task;
use std::gc::{Gc, GC};
#[test]
fn local_heap() {
let a = box(GC) 5i;
let b = a;
assert!(*a == 5);
assert!(*b == 5);
}
#[test]
fn tls() {
local_data_key!(key: Gc<String>)
key.replace(Some(box(GC) "data".to_string()));
assert_eq!(key.get().unwrap().as_slice(), "data");
local_data_key!(key2: Gc<String>)
key2.replace(Some(box(GC) "data".to_string()));
assert_eq!(key2.get().unwrap().as_slice(), "data");
}
#[test]
fn unwind() {
let result = task::try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = task::try::<()>(proc() fail!());
rtdebug!("trying second assert");
assert!( | /// task.run(|| { | random_line_split |
task.rs | ;
use local::Local;
use local_heap::LocalHeap;
use rtio::LocalIo;
use unwind;
use unwind::Unwinder;
use collections::str::SendStr;
/// State associated with Rust tasks.
///
/// Rust tasks are primarily built with two separate components. One is this
/// structure which handles standard services such as TLD, unwinding support,
/// naming of a task, etc. The second component is the runtime of this task, a
/// `Runtime` trait object.
///
/// The `Runtime` object instructs this task how it can perform critical
/// operations such as blocking, rescheduling, I/O constructors, etc. The two
/// halves are separately owned, but one is often found contained in the other.
/// A task's runtime can be reflected upon with the `maybe_take_runtime` method,
/// and otherwise its ownership is managed with `take_runtime` and
/// `put_runtime`.
///
/// In general, this structure should not be used. This is meant to be an
/// unstable internal detail of the runtime itself. From time-to-time, however,
/// it is useful to manage tasks directly. An example of this would be
/// interoperating with the Rust runtime from FFI callbacks or such. For this
/// reason, there are two methods of note with the `Task` structure.
///
/// * `run` - This function will execute a closure inside the context of a task.
/// Failure is caught and handled via the task's on_exit callback. If
/// this fails, the task is still returned, but it can no longer be
/// used, it is poisoned.
///
/// * `destroy` - This is a required function to call to destroy a task. If a
/// task falls out of scope without calling `destroy`, its
/// destructor bomb will go off, aborting the process.
///
/// With these two methods, tasks can be re-used to execute code inside of its
/// context while having a point in the future where destruction is allowed.
/// More information can be found on these specific methods.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a task using a native runtime
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code, catching any possible failures
/// let task = task.run(|| {
/// // Run some code inside this task
/// println!("Hello with a native runtime!");
/// });
///
/// // Run some code again, catching the failure
/// let task = task.run(|| {
/// fail!("oh no, what to do!");
/// });
///
/// // Now that the task is failed, it can never be used again
/// assert!(task.is_destroyed());
///
/// // Deallocate the resources associated with this task
/// task.destroy();
/// # }
/// ```
pub struct Task {
pub heap: LocalHeap,
pub gc: GarbageCollector,
pub storage: LocalStorage,
pub unwinder: Unwinder,
pub death: Death,
pub name: Option<SendStr>,
state: TaskState,
imp: Option<Box<Runtime + Send +'static>>,
}
// Once a task has entered the `Armed` state it must be destroyed via `drop`,
// and no other method. This state is used to track this transition.
#[deriving(PartialEq)]
enum TaskState {
New,
Armed,
Destroyed,
}
pub struct TaskOpts {
/// Invoke this procedure with the result of the task when it finishes.
pub on_exit: Option<proc(Result): Send>,
/// A name for the task-to-be, for identification in failure messages
pub name: Option<SendStr>,
/// The size of the stack for the spawned task
pub stack_size: Option<uint>,
}
/// Indicates the manner in which a task exited.
///
/// A task that completes without failing is considered to exit successfully.
///
/// If you wish for this result's delivery to block until all
/// children tasks complete, recommend using a result future.
pub type Result = ::core::result::Result<(), Box<Any + Send>>;
pub struct GarbageCollector;
pub struct LocalStorage(pub Option<local_data::Map>);
/// A handle to a blocked task. Usually this means having the Box<Task>
/// pointer by ownership, but if the task is killable, a killer can steal it
/// at any time.
pub enum BlockedTask {
Owned(Box<Task>),
Shared(Arc<AtomicUint>),
}
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
pub on_exit: Option<proc(Result):Send>,
marker: marker::NoCopy,
}
pub struct BlockedTasks {
inner: Arc<AtomicUint>,
}
impl Task {
/// Creates a new uninitialized task.
///
/// This method cannot be used to immediately invoke `run` because the task
/// itself will likely require a runtime to be inserted via `put_runtime`.
///
/// Note that you likely don't want to call this function, but rather the
/// task creation functions through libnative or libgreen.
pub fn new() -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
unwinder: Unwinder::new(),
death: Death::new(),
state: New,
name: None,
imp: None,
}
}
/// Consumes ownership of a task, runs some code, and returns the task back.
///
/// This function can be used as an emulated "try/catch" to interoperate
/// with the rust runtime at the outermost boundary. It is not possible to
/// use this function in a nested fashion (a try/catch inside of another
/// try/catch). Invoking this function is quite cheap.
///
/// If the closure `f` succeeds, then the returned task can be used again
/// for another invocation of `run`. If the closure `f` fails then `self`
/// will be internally destroyed along with all of the other associated
/// resources of this task. The `on_exit` callback is invoked with the
/// cause of failure (not returned here). This can be discovered by querying
/// `is_destroyed()`.
///
/// Note that it is possible to view partial execution of the closure `f`
/// because it is not guaranteed to run to completion, but this function is
/// guaranteed to return if it fails. Care should be taken to ensure that
/// stack references made by `f` are handled appropriately.
///
/// It is invalid to call this function with a task that has been previously
/// destroyed via a failed call to `run`.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a new native task
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code once and then destroy this task
/// task.run(|| {
/// println!("Hello with a native runtime!");
/// }).destroy();
/// # }
/// ```
pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
// First, make sure that no one else is in TLS. This does not allow
// recursive invocations of run(). If there's no one else, then
// relinquish ownership of ourselves back into TLS.
if Local::exists(None::<Task>) {
fail!("cannot run a task recursively inside another");
}
self.state = Armed;
Local::put(self);
// There are two primary reasons that general try/catch is unsafe. The
// first is that we do not support nested try/catch. The above check for
// an existing task in TLS is sufficient for this invariant to be
// upheld. The second is that unwinding while unwinding is not defined.
// We take care of that by having an 'unwinding' flag in the task
// itself. For these reasons, this unsafety should be ok.
let result = unsafe { unwind::try(f) };
// After running the closure given return the task back out if it ran
// successfully, or clean up the task if it failed.
let task: Box<Task> = Local::take();
match result {
Ok(()) => task,
Err(cause) => { task.cleanup(Err(cause)) }
}
}
/// Destroy all associated resources of this task.
///
/// This function will perform any necessary clean up to prepare the task
/// for destruction. It is required that this is called before a `Task`
/// falls out of scope.
///
/// The returned task cannot be used for running any more code, but it may
/// be used to extract the runtime as necessary.
pub fn destroy(self: Box<Task>) -> Box<Task> {
if self.is_destroyed() {
self
} else {
self.cleanup(Ok(()))
}
}
/// Cleans up a task, processing the result of the task as appropriate.
///
/// This function consumes ownership of the task, deallocating it once it's
/// done being processed. It is assumed that TLD and the local heap have
/// already been destroyed and/or annihilated.
fn cleanup(self: Box<Task>, result: Result) -> Box<Task> {
// The first thing to do when cleaning up is to deallocate our local
// resources, such as TLD and GC data.
//
// FIXME: there are a number of problems with this code
//
// 1. If any TLD object fails destruction, then all of TLD will leak.
// This appears to be a consequence of #14875.
//
// 2. Failing during GC annihilation aborts the runtime #14876.
//
// 3. Setting a TLD key while destroying TLD or while destroying GC will
// abort the runtime #14807.
//
// 4. Invoking GC in GC destructors will abort the runtime #6996.
//
// 5. The order of destruction of TLD and GC matters, but either way is
// susceptible to leaks (see 3/4) #8302.
//
// That being said, there are a few upshots to this code
//
// 1. If TLD destruction fails, heap destruction will be attempted.
// There is a test for this at fail-during-tld-destroy.rs. Sadly the
// other way can't be tested due to point 2 above. Note that we must
// immortalize the heap first because if any deallocations are
// attempted while TLD is being dropped it will attempt to free the
// allocation from the wrong heap (because the current one has been
// replaced).
//
// 2. One failure in destruction is tolerable, so long as the task
// didn't originally fail while it was running.
//
// And with all that in mind, we attempt to clean things up!
let mut task = self.run(|| {
let mut task = Local::borrow(None::<Task>);
let tld = {
let &LocalStorage(ref mut optmap) = &mut task.storage;
optmap.take()
};
let mut heap = mem::replace(&mut task.heap, LocalHeap::new());
unsafe { heap.immortalize() }
drop(task);
// First, destroy task-local storage. This may run user dtors.
drop(tld);
// Destroy remaining boxes. Also may run user dtors.
drop(heap);
});
// If the above `run` block failed, then it must be the case that the
// task had previously succeeded. This also means that the code below
// was recursively run via the `run` method invoking this method. In
// this case, we just make sure the world is as we thought, and return.
if task.is_destroyed() {
rtassert!(result.is_ok())
return task
}
// After taking care of the data above, we need to transmit the result
// of this task.
let what_to_do = task.death.on_exit.take();
Local::put(task);
// FIXME: this is running in a seriously constrained context. If this
// allocates GC or allocates TLD then it will likely abort the
// runtime. Similarly, if this fails, this will also likely abort
// the runtime.
//
// This closure is currently limited to a channel send via the
// standard library's task interface, but this needs
// reconsideration to whether it's a reasonable thing to let a
// task to do or not.
match what_to_do {
Some(f) => { f(result) }
None => { drop(result) }
}
// Now that we're done, we remove the task from TLS and flag it for
// destruction.
let mut task: Box<Task> = Local::take();
task.state = Destroyed;
return task;
}
/// Queries whether this can be destroyed or not.
pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
/// Inserts a runtime object into this task, transferring ownership to the
/// task. It is illegal to replace a previous runtime object in this task
/// with this argument.
pub fn put_runtime(&mut self, ops: Box<Runtime + Send +'static>) {
assert!(self.imp.is_none());
self.imp = Some(ops);
}
/// Removes the runtime from this task, transferring ownership to the
/// caller.
pub fn take_runtime(&mut self) -> Box<Runtime + Send +'static> {
assert!(self.imp.is_some());
self.imp.take().unwrap()
}
/// Attempts to extract the runtime as a specific type. If the runtime does
/// not have the provided type, then the runtime is not removed. If the
/// runtime does have the specified type, then it is removed and returned
/// (transfer of ownership).
///
/// It is recommended to only use this method when *absolutely necessary*.
/// This function may not be available in the future.
pub fn maybe_take_runtime<T:'static>(&mut self) -> Option<Box<T>> {
// This is a terrible, terrible function. The general idea here is to
// take the runtime, cast it to Box<Any>, check if it has the right
// type, and then re-cast it back if necessary. The method of doing
// this is pretty sketchy and involves shuffling vtables of trait
// objects around, but it gets the job done.
//
// FIXME: This function is a serious code smell and should be avoided at
// all costs. I have yet to think of a method to avoid this
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
let imp = self.imp.take().unwrap();
let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable;
match imp.wrap().downcast::<T>() {
Ok(t) => Some(t),
Err(t) => {
let data = mem::transmute::<_, raw::TraitObject>(t).data;
let obj: Box<Runtime + Send +'static> =
mem::transmute(raw::TraitObject {
vtable: vtable,
data: data,
});
self.put_runtime(obj);
None
}
}
}
}
/// Spawns a sibling to this task. The newly spawned task is configured with
/// the `opts` structure and will run `f` as the body of its code.
pub fn spawn_sibling(mut self: Box<Task>,
opts: TaskOpts,
f: proc(): Send) {
let ops = self.imp.take().unwrap();
ops.spawn_sibling(self, opts, f)
}
/// Deschedules the current task, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
pub fn deschedule(mut self: Box<Task>,
amt: uint,
f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) {
let ops = self.imp.take().unwrap();
ops.deschedule(amt, self, f)
}
/// Wakes up a previously blocked task, optionally specifying whether the
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
pub fn reawaken(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.reawaken(self);
}
/// Yields control of this task to another task. This function will
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn | (mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.maybe_yield(self);
}
/// Acquires a handle to the I/O factory that this task contains, normally
/// stored in the task's runtime. This factory may not always be available,
/// which is why the return type is `Option`
pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
self.imp.as_mut().unwrap().local_io()
}
/// Returns the stack bounds for this task in (lo, hi) format. The stack
/// bounds may not be known for all tasks, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
self.imp.as_ref().unwrap().stack_bounds()
}
/// Returns whether it is legal for this task to block the OS thread that it
/// is running on.
pub fn can_block(&self) -> bool {
self.imp.as_ref().unwrap().can_block()
}
/// Consume this task, flagging it as a candidate for destruction.
///
/// This function is required to be invoked to destroy a task. A task
/// destroyed through a normal drop will abort.
pub fn drop(mut self) {
self.state = Destroyed;
}
}
impl Drop for Task {
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", self as *mut Task as uint);
rtassert!(self.state!= Armed);
}
}
impl TaskOpts {
pub fn new() -> TaskOpts {
TaskOpts { on_exit: None, name: None, stack_size: None }
}
}
impl Iterator<BlockedTask> for BlockedTasks {
fn next(&mut self) -> Option<BlockedTask> {
Some(Shared(self.inner.clone()))
}
}
impl BlockedTask {
/// Returns Some if the task was successfully woken; None if already killed.
pub fn wake(self) -> Option<Box<Task>> {
match self {
Owned(task) => Some(task),
Shared(arc) => {
match arc.swap(0, SeqCst) {
0 => None,
n => Some(unsafe { mem::transmute(n) }),
}
}
}
}
/// Reawakens this task if ownership is acquired. If finer-grained control
/// is desired, use `wake` instead.
pub fn reawaken(self) {
self.wake().map(|t| t.reawaken());
}
// This assertion has two flavours because the wake involves an atomic op.
// In the faster version, destructors will fail dramatically instead.
#[cfg(not(test))] pub fn trash(self) { }
#[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); }
/// Create a blocked task, unless the task was already killed.
pub fn block(task: Box<Task>) -> BlockedTask {
Owned(task)
}
/// Converts one blocked task handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
Arc::new(flag)
}
Shared(arc) => arc.clone(),
};
BlockedTasks{ inner: arc }.take(num_handles)
}
/// Convert to an unsafe uint value. Useful for storing in a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_to_uint(self) -> uint {
match self {
Owned(task) => {
let blocked_task_ptr: uint = mem::transmute(task);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr
}
Shared(arc) => {
let blocked_task_ptr: uint = mem::transmute(box arc);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr | 0x1
}
}
}
/// Convert from an unsafe uint value. Useful for retrieving a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
if blocked_task_ptr & 0x1 == 0 {
Owned(mem::transmute(blocked_task_ptr))
} else {
let ptr: Box<Arc<AtomicUint>> =
mem::transmute(blocked_task_ptr &!1);
Shared(*ptr)
}
}
}
impl Death {
pub fn new() -> Death {
Death { on_exit: None, marker: marker::NoCopy }
}
}
#[cfg(test)]
mod test {
use super::*;
use std::prelude::*;
use std::task;
use std::gc::{Gc, GC};
#[test]
fn local_heap() {
let a = box(GC) 5i;
let b = a;
assert!(*a == 5);
assert!(*b == 5);
}
#[test]
fn tls() {
local_data_key!(key: Gc<String>)
key.replace(Some(box(GC) "data".to_string()));
assert_eq!(key.get().unwrap().as_slice(), "data");
local_data_key!(key2: Gc<String>)
key2.replace(Some(box(GC) "data".to_string()));
assert_eq!(key2.get().unwrap().as_slice(), "data");
}
#[test]
fn unwind() {
let result = task::try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = task::try::<()>(proc() fail!());
rtdebug!("trying second assert");
| yield_now | identifier_name |
task.rs | ;
use local::Local;
use local_heap::LocalHeap;
use rtio::LocalIo;
use unwind;
use unwind::Unwinder;
use collections::str::SendStr;
/// State associated with Rust tasks.
///
/// Rust tasks are primarily built with two separate components. One is this
/// structure which handles standard services such as TLD, unwinding support,
/// naming of a task, etc. The second component is the runtime of this task, a
/// `Runtime` trait object.
///
/// The `Runtime` object instructs this task how it can perform critical
/// operations such as blocking, rescheduling, I/O constructors, etc. The two
/// halves are separately owned, but one is often found contained in the other.
/// A task's runtime can be reflected upon with the `maybe_take_runtime` method,
/// and otherwise its ownership is managed with `take_runtime` and
/// `put_runtime`.
///
/// In general, this structure should not be used. This is meant to be an
/// unstable internal detail of the runtime itself. From time-to-time, however,
/// it is useful to manage tasks directly. An example of this would be
/// interoperating with the Rust runtime from FFI callbacks or such. For this
/// reason, there are two methods of note with the `Task` structure.
///
/// * `run` - This function will execute a closure inside the context of a task.
/// Failure is caught and handled via the task's on_exit callback. If
/// this fails, the task is still returned, but it can no longer be
/// used, it is poisoned.
///
/// * `destroy` - This is a required function to call to destroy a task. If a
/// task falls out of scope without calling `destroy`, its
/// destructor bomb will go off, aborting the process.
///
/// With these two methods, tasks can be re-used to execute code inside of its
/// context while having a point in the future where destruction is allowed.
/// More information can be found on these specific methods.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a task using a native runtime
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code, catching any possible failures
/// let task = task.run(|| {
/// // Run some code inside this task
/// println!("Hello with a native runtime!");
/// });
///
/// // Run some code again, catching the failure
/// let task = task.run(|| {
/// fail!("oh no, what to do!");
/// });
///
/// // Now that the task is failed, it can never be used again
/// assert!(task.is_destroyed());
///
/// // Deallocate the resources associated with this task
/// task.destroy();
/// # }
/// ```
pub struct Task {
pub heap: LocalHeap,
pub gc: GarbageCollector,
pub storage: LocalStorage,
pub unwinder: Unwinder,
pub death: Death,
pub name: Option<SendStr>,
state: TaskState,
imp: Option<Box<Runtime + Send +'static>>,
}
// Once a task has entered the `Armed` state it must be destroyed via `drop`,
// and no other method. This state is used to track this transition.
#[deriving(PartialEq)]
enum TaskState {
New,
Armed,
Destroyed,
}
pub struct TaskOpts {
/// Invoke this procedure with the result of the task when it finishes.
pub on_exit: Option<proc(Result): Send>,
/// A name for the task-to-be, for identification in failure messages
pub name: Option<SendStr>,
/// The size of the stack for the spawned task
pub stack_size: Option<uint>,
}
/// Indicates the manner in which a task exited.
///
/// A task that completes without failing is considered to exit successfully.
///
/// If you wish for this result's delivery to block until all
/// children tasks complete, recommend using a result future.
pub type Result = ::core::result::Result<(), Box<Any + Send>>;
pub struct GarbageCollector;
pub struct LocalStorage(pub Option<local_data::Map>);
/// A handle to a blocked task. Usually this means having the Box<Task>
/// pointer by ownership, but if the task is killable, a killer can steal it
/// at any time.
pub enum BlockedTask {
Owned(Box<Task>),
Shared(Arc<AtomicUint>),
}
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
pub on_exit: Option<proc(Result):Send>,
marker: marker::NoCopy,
}
pub struct BlockedTasks {
inner: Arc<AtomicUint>,
}
impl Task {
/// Creates a new uninitialized task.
///
/// This method cannot be used to immediately invoke `run` because the task
/// itself will likely require a runtime to be inserted via `put_runtime`.
///
/// Note that you likely don't want to call this function, but rather the
/// task creation functions through libnative or libgreen.
pub fn new() -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
unwinder: Unwinder::new(),
death: Death::new(),
state: New,
name: None,
imp: None,
}
}
/// Consumes ownership of a task, runs some code, and returns the task back.
///
/// This function can be used as an emulated "try/catch" to interoperate
/// with the rust runtime at the outermost boundary. It is not possible to
/// use this function in a nested fashion (a try/catch inside of another
/// try/catch). Invoking this function is quite cheap.
///
/// If the closure `f` succeeds, then the returned task can be used again
/// for another invocation of `run`. If the closure `f` fails then `self`
/// will be internally destroyed along with all of the other associated
/// resources of this task. The `on_exit` callback is invoked with the
/// cause of failure (not returned here). This can be discovered by querying
/// `is_destroyed()`.
///
/// Note that it is possible to view partial execution of the closure `f`
/// because it is not guaranteed to run to completion, but this function is
/// guaranteed to return if it fails. Care should be taken to ensure that
/// stack references made by `f` are handled appropriately.
///
/// It is invalid to call this function with a task that has been previously
/// destroyed via a failed call to `run`.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a new native task
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code once and then destroy this task
/// task.run(|| {
/// println!("Hello with a native runtime!");
/// }).destroy();
/// # }
/// ```
pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
// First, make sure that no one else is in TLS. This does not allow
// recursive invocations of run(). If there's no one else, then
// relinquish ownership of ourselves back into TLS.
if Local::exists(None::<Task>) {
fail!("cannot run a task recursively inside another");
}
self.state = Armed;
Local::put(self);
// There are two primary reasons that general try/catch is unsafe. The
// first is that we do not support nested try/catch. The above check for
// an existing task in TLS is sufficient for this invariant to be
// upheld. The second is that unwinding while unwinding is not defined.
// We take care of that by having an 'unwinding' flag in the task
// itself. For these reasons, this unsafety should be ok.
let result = unsafe { unwind::try(f) };
// After running the closure given return the task back out if it ran
// successfully, or clean up the task if it failed.
let task: Box<Task> = Local::take();
match result {
Ok(()) => task,
Err(cause) => { task.cleanup(Err(cause)) }
}
}
/// Destroy all associated resources of this task.
///
/// This function will perform any necessary clean up to prepare the task
/// for destruction. It is required that this is called before a `Task`
/// falls out of scope.
///
/// The returned task cannot be used for running any more code, but it may
/// be used to extract the runtime as necessary.
pub fn destroy(self: Box<Task>) -> Box<Task> {
if self.is_destroyed() {
self
} else {
self.cleanup(Ok(()))
}
}
/// Cleans up a task, processing the result of the task as appropriate.
///
/// This function consumes ownership of the task, deallocating it once it's
/// done being processed. It is assumed that TLD and the local heap have
/// already been destroyed and/or annihilated.
fn cleanup(self: Box<Task>, result: Result) -> Box<Task> {
// The first thing to do when cleaning up is to deallocate our local
// resources, such as TLD and GC data.
//
// FIXME: there are a number of problems with this code
//
// 1. If any TLD object fails destruction, then all of TLD will leak.
// This appears to be a consequence of #14875.
//
// 2. Failing during GC annihilation aborts the runtime #14876.
//
// 3. Setting a TLD key while destroying TLD or while destroying GC will
// abort the runtime #14807.
//
// 4. Invoking GC in GC destructors will abort the runtime #6996.
//
// 5. The order of destruction of TLD and GC matters, but either way is
// susceptible to leaks (see 3/4) #8302.
//
// That being said, there are a few upshots to this code
//
// 1. If TLD destruction fails, heap destruction will be attempted.
// There is a test for this at fail-during-tld-destroy.rs. Sadly the
// other way can't be tested due to point 2 above. Note that we must
// immortalize the heap first because if any deallocations are
// attempted while TLD is being dropped it will attempt to free the
// allocation from the wrong heap (because the current one has been
// replaced).
//
// 2. One failure in destruction is tolerable, so long as the task
// didn't originally fail while it was running.
//
// And with all that in mind, we attempt to clean things up!
let mut task = self.run(|| {
let mut task = Local::borrow(None::<Task>);
let tld = {
let &LocalStorage(ref mut optmap) = &mut task.storage;
optmap.take()
};
let mut heap = mem::replace(&mut task.heap, LocalHeap::new());
unsafe { heap.immortalize() }
drop(task);
// First, destroy task-local storage. This may run user dtors.
drop(tld);
// Destroy remaining boxes. Also may run user dtors.
drop(heap);
});
// If the above `run` block failed, then it must be the case that the
// task had previously succeeded. This also means that the code below
// was recursively run via the `run` method invoking this method. In
// this case, we just make sure the world is as we thought, and return.
if task.is_destroyed() {
rtassert!(result.is_ok())
return task
}
// After taking care of the data above, we need to transmit the result
// of this task.
let what_to_do = task.death.on_exit.take();
Local::put(task);
// FIXME: this is running in a seriously constrained context. If this
// allocates GC or allocates TLD then it will likely abort the
// runtime. Similarly, if this fails, this will also likely abort
// the runtime.
//
// This closure is currently limited to a channel send via the
// standard library's task interface, but this needs
// reconsideration to whether it's a reasonable thing to let a
// task to do or not.
match what_to_do {
Some(f) => { f(result) }
None => { drop(result) }
}
// Now that we're done, we remove the task from TLS and flag it for
// destruction.
let mut task: Box<Task> = Local::take();
task.state = Destroyed;
return task;
}
/// Queries whether this can be destroyed or not.
pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
/// Inserts a runtime object into this task, transferring ownership to the
/// task. It is illegal to replace a previous runtime object in this task
/// with this argument.
pub fn put_runtime(&mut self, ops: Box<Runtime + Send +'static>) {
assert!(self.imp.is_none());
self.imp = Some(ops);
}
/// Removes the runtime from this task, transferring ownership to the
/// caller.
pub fn take_runtime(&mut self) -> Box<Runtime + Send +'static> {
assert!(self.imp.is_some());
self.imp.take().unwrap()
}
/// Attempts to extract the runtime as a specific type. If the runtime does
/// not have the provided type, then the runtime is not removed. If the
/// runtime does have the specified type, then it is removed and returned
/// (transfer of ownership).
///
/// It is recommended to only use this method when *absolutely necessary*.
/// This function may not be available in the future.
pub fn maybe_take_runtime<T:'static>(&mut self) -> Option<Box<T>> {
// This is a terrible, terrible function. The general idea here is to
// take the runtime, cast it to Box<Any>, check if it has the right
// type, and then re-cast it back if necessary. The method of doing
// this is pretty sketchy and involves shuffling vtables of trait
// objects around, but it gets the job done.
//
// FIXME: This function is a serious code smell and should be avoided at
// all costs. I have yet to think of a method to avoid this
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
let imp = self.imp.take().unwrap();
let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable;
match imp.wrap().downcast::<T>() {
Ok(t) => Some(t),
Err(t) => {
let data = mem::transmute::<_, raw::TraitObject>(t).data;
let obj: Box<Runtime + Send +'static> =
mem::transmute(raw::TraitObject {
vtable: vtable,
data: data,
});
self.put_runtime(obj);
None
}
}
}
}
/// Spawns a sibling to this task. The newly spawned task is configured with
/// the `opts` structure and will run `f` as the body of its code.
pub fn spawn_sibling(mut self: Box<Task>,
opts: TaskOpts,
f: proc(): Send) {
let ops = self.imp.take().unwrap();
ops.spawn_sibling(self, opts, f)
}
/// Deschedules the current task, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
pub fn deschedule(mut self: Box<Task>,
amt: uint,
f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) {
let ops = self.imp.take().unwrap();
ops.deschedule(amt, self, f)
}
/// Wakes up a previously blocked task, optionally specifying whether the
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
pub fn reawaken(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.reawaken(self);
}
/// Yields control of this task to another task. This function will
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn yield_now(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.maybe_yield(self);
}
/// Acquires a handle to the I/O factory that this task contains, normally
/// stored in the task's runtime. This factory may not always be available,
/// which is why the return type is `Option`
pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
self.imp.as_mut().unwrap().local_io()
}
/// Returns the stack bounds for this task in (lo, hi) format. The stack
/// bounds may not be known for all tasks, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
self.imp.as_ref().unwrap().stack_bounds()
}
/// Returns whether it is legal for this task to block the OS thread that it
/// is running on.
pub fn can_block(&self) -> bool {
self.imp.as_ref().unwrap().can_block()
}
/// Consume this task, flagging it as a candidate for destruction.
///
/// This function is required to be invoked to destroy a task. A task
/// destroyed through a normal drop will abort.
pub fn drop(mut self) {
self.state = Destroyed;
}
}
impl Drop for Task {
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", self as *mut Task as uint);
rtassert!(self.state!= Armed);
}
}
impl TaskOpts {
pub fn new() -> TaskOpts {
TaskOpts { on_exit: None, name: None, stack_size: None }
}
}
impl Iterator<BlockedTask> for BlockedTasks {
fn next(&mut self) -> Option<BlockedTask> {
Some(Shared(self.inner.clone()))
}
}
impl BlockedTask {
/// Returns Some if the task was successfully woken; None if already killed.
pub fn wake(self) -> Option<Box<Task>> {
match self {
Owned(task) => Some(task),
Shared(arc) => {
match arc.swap(0, SeqCst) {
0 => None,
n => Some(unsafe { mem::transmute(n) }),
}
}
}
}
/// Reawakens this task if ownership is acquired. If finer-grained control
/// is desired, use `wake` instead.
pub fn reawaken(self) {
self.wake().map(|t| t.reawaken());
}
// This assertion has two flavours because the wake involves an atomic op.
// In the faster version, destructors will fail dramatically instead.
#[cfg(not(test))] pub fn trash(self) { }
#[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); }
/// Create a blocked task, unless the task was already killed.
pub fn block(task: Box<Task>) -> BlockedTask {
Owned(task)
}
/// Converts one blocked task handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
Arc::new(flag)
}
Shared(arc) => arc.clone(),
};
BlockedTasks{ inner: arc }.take(num_handles)
}
/// Convert to an unsafe uint value. Useful for storing in a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_to_uint(self) -> uint {
match self {
Owned(task) => |
Shared(arc) => {
let blocked_task_ptr: uint = mem::transmute(box arc);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr | 0x1
}
}
}
/// Convert from an unsafe uint value. Useful for retrieving a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
if blocked_task_ptr & 0x1 == 0 {
Owned(mem::transmute(blocked_task_ptr))
} else {
let ptr: Box<Arc<AtomicUint>> =
mem::transmute(blocked_task_ptr &!1);
Shared(*ptr)
}
}
}
impl Death {
pub fn new() -> Death {
Death { on_exit: None, marker: marker::NoCopy }
}
}
#[cfg(test)]
mod test {
use super::*;
use std::prelude::*;
use std::task;
use std::gc::{Gc, GC};
#[test]
fn local_heap() {
let a = box(GC) 5i;
let b = a;
assert!(*a == 5);
assert!(*b == 5);
}
#[test]
fn tls() {
local_data_key!(key: Gc<String>)
key.replace(Some(box(GC) "data".to_string()));
assert_eq!(key.get().unwrap().as_slice(), "data");
local_data_key!(key2: Gc<String>)
key2.replace(Some(box(GC) "data".to_string()));
assert_eq!(key2.get().unwrap().as_slice(), "data");
}
#[test]
fn unwind() {
let result = task::try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = task::try::<()>(proc() fail!());
rtdebug!("trying second assert");
| {
let blocked_task_ptr: uint = mem::transmute(task);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr
} | conditional_block |
task.rs | ;
use local::Local;
use local_heap::LocalHeap;
use rtio::LocalIo;
use unwind;
use unwind::Unwinder;
use collections::str::SendStr;
/// State associated with Rust tasks.
///
/// Rust tasks are primarily built with two separate components. One is this
/// structure which handles standard services such as TLD, unwinding support,
/// naming of a task, etc. The second component is the runtime of this task, a
/// `Runtime` trait object.
///
/// The `Runtime` object instructs this task how it can perform critical
/// operations such as blocking, rescheduling, I/O constructors, etc. The two
/// halves are separately owned, but one is often found contained in the other.
/// A task's runtime can be reflected upon with the `maybe_take_runtime` method,
/// and otherwise its ownership is managed with `take_runtime` and
/// `put_runtime`.
///
/// In general, this structure should not be used. This is meant to be an
/// unstable internal detail of the runtime itself. From time-to-time, however,
/// it is useful to manage tasks directly. An example of this would be
/// interoperating with the Rust runtime from FFI callbacks or such. For this
/// reason, there are two methods of note with the `Task` structure.
///
/// * `run` - This function will execute a closure inside the context of a task.
/// Failure is caught and handled via the task's on_exit callback. If
/// this fails, the task is still returned, but it can no longer be
/// used, it is poisoned.
///
/// * `destroy` - This is a required function to call to destroy a task. If a
/// task falls out of scope without calling `destroy`, its
/// destructor bomb will go off, aborting the process.
///
/// With these two methods, tasks can be re-used to execute code inside of its
/// context while having a point in the future where destruction is allowed.
/// More information can be found on these specific methods.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a task using a native runtime
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code, catching any possible failures
/// let task = task.run(|| {
/// // Run some code inside this task
/// println!("Hello with a native runtime!");
/// });
///
/// // Run some code again, catching the failure
/// let task = task.run(|| {
/// fail!("oh no, what to do!");
/// });
///
/// // Now that the task is failed, it can never be used again
/// assert!(task.is_destroyed());
///
/// // Deallocate the resources associated with this task
/// task.destroy();
/// # }
/// ```
pub struct Task {
pub heap: LocalHeap,
pub gc: GarbageCollector,
pub storage: LocalStorage,
pub unwinder: Unwinder,
pub death: Death,
pub name: Option<SendStr>,
state: TaskState,
imp: Option<Box<Runtime + Send +'static>>,
}
// Once a task has entered the `Armed` state it must be destroyed via `drop`,
// and no other method. This state is used to track this transition.
#[deriving(PartialEq)]
enum TaskState {
New,
Armed,
Destroyed,
}
pub struct TaskOpts {
/// Invoke this procedure with the result of the task when it finishes.
pub on_exit: Option<proc(Result): Send>,
/// A name for the task-to-be, for identification in failure messages
pub name: Option<SendStr>,
/// The size of the stack for the spawned task
pub stack_size: Option<uint>,
}
/// Indicates the manner in which a task exited.
///
/// A task that completes without failing is considered to exit successfully.
///
/// If you wish for this result's delivery to block until all
/// children tasks complete, recommend using a result future.
pub type Result = ::core::result::Result<(), Box<Any + Send>>;
pub struct GarbageCollector;
pub struct LocalStorage(pub Option<local_data::Map>);
/// A handle to a blocked task. Usually this means having the Box<Task>
/// pointer by ownership, but if the task is killable, a killer can steal it
/// at any time.
pub enum BlockedTask {
Owned(Box<Task>),
Shared(Arc<AtomicUint>),
}
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
pub on_exit: Option<proc(Result):Send>,
marker: marker::NoCopy,
}
pub struct BlockedTasks {
inner: Arc<AtomicUint>,
}
impl Task {
/// Creates a new uninitialized task.
///
/// This method cannot be used to immediately invoke `run` because the task
/// itself will likely require a runtime to be inserted via `put_runtime`.
///
/// Note that you likely don't want to call this function, but rather the
/// task creation functions through libnative or libgreen.
pub fn new() -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
unwinder: Unwinder::new(),
death: Death::new(),
state: New,
name: None,
imp: None,
}
}
/// Consumes ownership of a task, runs some code, and returns the task back.
///
/// This function can be used as an emulated "try/catch" to interoperate
/// with the rust runtime at the outermost boundary. It is not possible to
/// use this function in a nested fashion (a try/catch inside of another
/// try/catch). Invoking this function is quite cheap.
///
/// If the closure `f` succeeds, then the returned task can be used again
/// for another invocation of `run`. If the closure `f` fails then `self`
/// will be internally destroyed along with all of the other associated
/// resources of this task. The `on_exit` callback is invoked with the
/// cause of failure (not returned here). This can be discovered by querying
/// `is_destroyed()`.
///
/// Note that it is possible to view partial execution of the closure `f`
/// because it is not guaranteed to run to completion, but this function is
/// guaranteed to return if it fails. Care should be taken to ensure that
/// stack references made by `f` are handled appropriately.
///
/// It is invalid to call this function with a task that has been previously
/// destroyed via a failed call to `run`.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a new native task
/// let task = native::task::new((0, uint::MAX));
///
/// // Run some code once and then destroy this task
/// task.run(|| {
/// println!("Hello with a native runtime!");
/// }).destroy();
/// # }
/// ```
pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
// First, make sure that no one else is in TLS. This does not allow
// recursive invocations of run(). If there's no one else, then
// relinquish ownership of ourselves back into TLS.
if Local::exists(None::<Task>) {
fail!("cannot run a task recursively inside another");
}
self.state = Armed;
Local::put(self);
// There are two primary reasons that general try/catch is unsafe. The
// first is that we do not support nested try/catch. The above check for
// an existing task in TLS is sufficient for this invariant to be
// upheld. The second is that unwinding while unwinding is not defined.
// We take care of that by having an 'unwinding' flag in the task
// itself. For these reasons, this unsafety should be ok.
let result = unsafe { unwind::try(f) };
// After running the closure given return the task back out if it ran
// successfully, or clean up the task if it failed.
let task: Box<Task> = Local::take();
match result {
Ok(()) => task,
Err(cause) => { task.cleanup(Err(cause)) }
}
}
/// Destroy all associated resources of this task.
///
/// This function will perform any necessary clean up to prepare the task
/// for destruction. It is required that this is called before a `Task`
/// falls out of scope.
///
/// The returned task cannot be used for running any more code, but it may
/// be used to extract the runtime as necessary.
pub fn destroy(self: Box<Task>) -> Box<Task> {
if self.is_destroyed() {
self
} else {
self.cleanup(Ok(()))
}
}
/// Cleans up a task, processing the result of the task as appropriate.
///
/// This function consumes ownership of the task, deallocating it once it's
/// done being processed. It is assumed that TLD and the local heap have
/// already been destroyed and/or annihilated.
fn cleanup(self: Box<Task>, result: Result) -> Box<Task> {
// The first thing to do when cleaning up is to deallocate our local
// resources, such as TLD and GC data.
//
// FIXME: there are a number of problems with this code
//
// 1. If any TLD object fails destruction, then all of TLD will leak.
// This appears to be a consequence of #14875.
//
// 2. Failing during GC annihilation aborts the runtime #14876.
//
// 3. Setting a TLD key while destroying TLD or while destroying GC will
// abort the runtime #14807.
//
// 4. Invoking GC in GC destructors will abort the runtime #6996.
//
// 5. The order of destruction of TLD and GC matters, but either way is
// susceptible to leaks (see 3/4) #8302.
//
// That being said, there are a few upshots to this code
//
// 1. If TLD destruction fails, heap destruction will be attempted.
// There is a test for this at fail-during-tld-destroy.rs. Sadly the
// other way can't be tested due to point 2 above. Note that we must
// immortalize the heap first because if any deallocations are
// attempted while TLD is being dropped it will attempt to free the
// allocation from the wrong heap (because the current one has been
// replaced).
//
// 2. One failure in destruction is tolerable, so long as the task
// didn't originally fail while it was running.
//
// And with all that in mind, we attempt to clean things up!
let mut task = self.run(|| {
let mut task = Local::borrow(None::<Task>);
let tld = {
let &LocalStorage(ref mut optmap) = &mut task.storage;
optmap.take()
};
let mut heap = mem::replace(&mut task.heap, LocalHeap::new());
unsafe { heap.immortalize() }
drop(task);
// First, destroy task-local storage. This may run user dtors.
drop(tld);
// Destroy remaining boxes. Also may run user dtors.
drop(heap);
});
// If the above `run` block failed, then it must be the case that the
// task had previously succeeded. This also means that the code below
// was recursively run via the `run` method invoking this method. In
// this case, we just make sure the world is as we thought, and return.
if task.is_destroyed() {
rtassert!(result.is_ok())
return task
}
// After taking care of the data above, we need to transmit the result
// of this task.
let what_to_do = task.death.on_exit.take();
Local::put(task);
// FIXME: this is running in a seriously constrained context. If this
// allocates GC or allocates TLD then it will likely abort the
// runtime. Similarly, if this fails, this will also likely abort
// the runtime.
//
// This closure is currently limited to a channel send via the
// standard library's task interface, but this needs
// reconsideration to whether it's a reasonable thing to let a
// task to do or not.
match what_to_do {
Some(f) => { f(result) }
None => { drop(result) }
}
// Now that we're done, we remove the task from TLS and flag it for
// destruction.
let mut task: Box<Task> = Local::take();
task.state = Destroyed;
return task;
}
/// Queries whether this can be destroyed or not.
pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
/// Inserts a runtime object into this task, transferring ownership to the
/// task. It is illegal to replace a previous runtime object in this task
/// with this argument.
pub fn put_runtime(&mut self, ops: Box<Runtime + Send +'static>) {
assert!(self.imp.is_none());
self.imp = Some(ops);
}
/// Removes the runtime from this task, transferring ownership to the
/// caller.
pub fn take_runtime(&mut self) -> Box<Runtime + Send +'static> {
assert!(self.imp.is_some());
self.imp.take().unwrap()
}
/// Attempts to extract the runtime as a specific type. If the runtime does
/// not have the provided type, then the runtime is not removed. If the
/// runtime does have the specified type, then it is removed and returned
/// (transfer of ownership).
///
/// It is recommended to only use this method when *absolutely necessary*.
/// This function may not be available in the future.
pub fn maybe_take_runtime<T:'static>(&mut self) -> Option<Box<T>> {
// This is a terrible, terrible function. The general idea here is to
// take the runtime, cast it to Box<Any>, check if it has the right
// type, and then re-cast it back if necessary. The method of doing
// this is pretty sketchy and involves shuffling vtables of trait
// objects around, but it gets the job done.
//
// FIXME: This function is a serious code smell and should be avoided at
// all costs. I have yet to think of a method to avoid this
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
let imp = self.imp.take().unwrap();
let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable;
match imp.wrap().downcast::<T>() {
Ok(t) => Some(t),
Err(t) => {
let data = mem::transmute::<_, raw::TraitObject>(t).data;
let obj: Box<Runtime + Send +'static> =
mem::transmute(raw::TraitObject {
vtable: vtable,
data: data,
});
self.put_runtime(obj);
None
}
}
}
}
/// Spawns a sibling to this task. The newly spawned task is configured with
/// the `opts` structure and will run `f` as the body of its code.
pub fn spawn_sibling(mut self: Box<Task>,
opts: TaskOpts,
f: proc(): Send) {
let ops = self.imp.take().unwrap();
ops.spawn_sibling(self, opts, f)
}
/// Deschedules the current task, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
pub fn deschedule(mut self: Box<Task>,
amt: uint,
f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) {
let ops = self.imp.take().unwrap();
ops.deschedule(amt, self, f)
}
/// Wakes up a previously blocked task, optionally specifying whether the
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
pub fn reawaken(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.reawaken(self);
}
/// Yields control of this task to another task. This function will
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn yield_now(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.maybe_yield(self);
}
/// Acquires a handle to the I/O factory that this task contains, normally
/// stored in the task's runtime. This factory may not always be available,
/// which is why the return type is `Option`
pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
self.imp.as_mut().unwrap().local_io()
}
/// Returns the stack bounds for this task in (lo, hi) format. The stack
/// bounds may not be known for all tasks, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
self.imp.as_ref().unwrap().stack_bounds()
}
/// Returns whether it is legal for this task to block the OS thread that it
/// is running on.
pub fn can_block(&self) -> bool {
self.imp.as_ref().unwrap().can_block()
}
/// Consume this task, flagging it as a candidate for destruction.
///
/// This function is required to be invoked to destroy a task. A task
/// destroyed through a normal drop will abort.
pub fn drop(mut self) {
self.state = Destroyed;
}
}
impl Drop for Task {
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", self as *mut Task as uint);
rtassert!(self.state!= Armed);
}
}
impl TaskOpts {
pub fn new() -> TaskOpts {
TaskOpts { on_exit: None, name: None, stack_size: None }
}
}
impl Iterator<BlockedTask> for BlockedTasks {
fn next(&mut self) -> Option<BlockedTask> {
Some(Shared(self.inner.clone()))
}
}
impl BlockedTask {
/// Returns Some if the task was successfully woken; None if already killed.
pub fn wake(self) -> Option<Box<Task>> {
match self {
Owned(task) => Some(task),
Shared(arc) => {
match arc.swap(0, SeqCst) {
0 => None,
n => Some(unsafe { mem::transmute(n) }),
}
}
}
}
/// Reawakens this task if ownership is acquired. If finer-grained control
/// is desired, use `wake` instead.
pub fn reawaken(self) {
self.wake().map(|t| t.reawaken());
}
// This assertion has two flavours because the wake involves an atomic op.
// In the faster version, destructors will fail dramatically instead.
#[cfg(not(test))] pub fn trash(self) { }
#[cfg(test)] pub fn trash(self) |
/// Create a blocked task, unless the task was already killed.
pub fn block(task: Box<Task>) -> BlockedTask {
Owned(task)
}
/// Converts one blocked task handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
Arc::new(flag)
}
Shared(arc) => arc.clone(),
};
BlockedTasks{ inner: arc }.take(num_handles)
}
/// Convert to an unsafe uint value. Useful for storing in a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_to_uint(self) -> uint {
match self {
Owned(task) => {
let blocked_task_ptr: uint = mem::transmute(task);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr
}
Shared(arc) => {
let blocked_task_ptr: uint = mem::transmute(box arc);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr | 0x1
}
}
}
/// Convert from an unsafe uint value. Useful for retrieving a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
if blocked_task_ptr & 0x1 == 0 {
Owned(mem::transmute(blocked_task_ptr))
} else {
let ptr: Box<Arc<AtomicUint>> =
mem::transmute(blocked_task_ptr &!1);
Shared(*ptr)
}
}
}
impl Death {
pub fn new() -> Death {
Death { on_exit: None, marker: marker::NoCopy }
}
}
#[cfg(test)]
mod test {
use super::*;
use std::prelude::*;
use std::task;
use std::gc::{Gc, GC};
#[test]
fn local_heap() {
let a = box(GC) 5i;
let b = a;
assert!(*a == 5);
assert!(*b == 5);
}
#[test]
fn tls() {
local_data_key!(key: Gc<String>)
key.replace(Some(box(GC) "data".to_string()));
assert_eq!(key.get().unwrap().as_slice(), "data");
local_data_key!(key2: Gc<String>)
key2.replace(Some(box(GC) "data".to_string()));
assert_eq!(key2.get().unwrap().as_slice(), "data");
}
#[test]
fn unwind() {
let result = task::try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = task::try::<()>(proc() fail!());
rtdebug!("trying second assert");
| { assert!(self.wake().is_none()); } | identifier_body |
htmlmodelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLModElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLModElementDerived; | use dom::document::Document;
use dom::element::HTMLModElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLModElement {
htmlelement: HTMLElement
}
impl HTMLModElementDerived for EventTarget {
fn is_htmlmodelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLModElementTypeId))
}
}
impl HTMLModElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLModElement {
HTMLModElement {
htmlelement: HTMLElement::new_inherited(HTMLModElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLModElement> {
let element = HTMLModElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLModElementBinding::Wrap)
}
}
impl Reflectable for HTMLModElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
} | use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector}; | random_line_split |
htmlmodelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLModElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLModElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLModElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLModElement {
htmlelement: HTMLElement
}
impl HTMLModElementDerived for EventTarget {
fn is_htmlmodelement(&self) -> bool |
}
impl HTMLModElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLModElement {
HTMLModElement {
htmlelement: HTMLElement::new_inherited(HTMLModElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLModElement> {
let element = HTMLModElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLModElementBinding::Wrap)
}
}
impl Reflectable for HTMLModElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
| {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLModElementTypeId))
} | identifier_body |
htmlmodelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLModElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLModElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLModElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLModElement {
htmlelement: HTMLElement
}
impl HTMLModElementDerived for EventTarget {
fn is_htmlmodelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLModElementTypeId))
}
}
impl HTMLModElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLModElement {
HTMLModElement {
htmlelement: HTMLElement::new_inherited(HTMLModElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn | (localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLModElement> {
let element = HTMLModElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLModElementBinding::Wrap)
}
}
impl Reflectable for HTMLModElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
| new | identifier_name |
ui.rs | info.enable();
shader.texture_offset.enable();
shader.color.enable();
shader.position.vertex_pointer_int(3, gl::SHORT, 28, 0);
shader.texture_info.vertex_pointer(4, gl::UNSIGNED_SHORT, false, 28, 8);
shader.texture_offset.vertex_pointer_int(3, gl::SHORT, 28, 16);
shader.color.vertex_pointer(4, gl::UNSIGNED_BYTE, true, 28, 24);
let index_buffer = gl::Buffer::new();
index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
let mut pages = Vec::with_capacity(0x100);
for _ in 0..0x100 {
pages.push(Option::None);
}
let mut char_map = HashMap::new();
let ascii_chars = "ÀÁÂÈÊËÍÓÔÕÚßãõğİıŒœŞşŴŵžȇ \
!\"#$%&'()*+,-./0123456789:;\
<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~ \
ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜø£Ø×ƒáíóúñѪº¿®¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞\
╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αβΓπΣσμτΦΘΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■";
for (pos, c) in ascii_chars.chars().enumerate() {
char_map.insert(c, ::std::char::from_u32(pos as u32).unwrap());
}
let mut state = UIState {
textures: textures,
resources: res,
version: 0xFFFF,
data: Vec::new(),
count: 0,
prev_size: 0,
index_type: gl::UNSIGNED_BYTE,
array: array,
buffer: buffer,
index_buffer: index_buffer,
max_index: 0,
shader: shader,
// Font
font_pages: pages,
font_character_info: vec![(0, 0); 0x10000],
char_map: char_map,
page_width: 0.0,
page_height: 0.0,
};
state.load_font();
state
}
pub fn tick(&mut self, width: u32, height: u32) {
{
let version = self.resources.read().unwrap().version();
if self.version!= version {
self.version = version;
self.load_font();
}
}
// Prevent clipping with the world
gl::clear(gl::ClearFlags::Depth);
gl::enable(gl::BLEND);
gl::blend_func(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
self.shader.program.use_program();
self.shader.texture.set_int(0);
if self.count > 0 {
self.array.bind();
if self.max_index < self.count {
let (data, ty) = render::generate_element_buffer(self.count);
self.index_type = ty;
self.index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
self.index_buffer.set_data(gl::ELEMENT_ARRAY_BUFFER, &data, gl::DYNAMIC_DRAW);
self.max_index = self.count;
}
self.shader.screensize.set_float2(width as f32, height as f32);
self.buffer.bind(gl::ARRAY_BUFFER);
self.index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
if self.data.len() > self.prev_size {
self.prev_size = self.data.len();
self.buffer.set_data(gl::ARRAY_BUFFER, &self.data, gl::STREAM_DRAW);
} else {
self.buffer.re_set_data(gl::ARRAY_BUFFER, &self.data);
}
gl::draw_elements(gl::TRIANGLES, self.count as i32, self.index_type, 0);
}
gl::disable(gl::BLEND);
self.data.clear();
self.count = 0;
}
pub fn add_bytes(&mut self, data: &[u8]) {
self.data.extend_from_slice(data);
self.count += (data.len() / (28 * 4)) * 6;
}
pub fn character_texture(&mut self, c: char) -> render::Texture {
let raw = c as u32;
let page = raw >> 8;
// Lazy load fonts to size memory
if self.font_pages[page as usize].is_none() {
let name = if page == 0 {
"font/ascii".to_owned()
} else {
format!("font/unicode_page_{:02X}", page)
};
let textures = self.textures.clone();
self.font_pages[page as usize] = Some(render::Renderer::get_texture(&textures, &name));
}
let p = self.font_pages[page as usize].clone().unwrap();
let raw = if page == 0 {
(*self.char_map.get(&c).unwrap_or(&c)) as u32
} else {
raw
};
let ch = raw & 0xFF;
let cx = ch & 0xF;
let cy = ch >> 4;
let info = self.font_character_info[raw as usize];
if page == 0 {
let sw = (self.page_width / 16.0) as u32;
let sh = (self.page_height / 16.0) as u32;
return p.relative((cx * sw + info.0 as u32) as f32 / (self.page_width as f32),
(cy * sh) as f32 / (self.page_height as f32),
(info.1 - info.0) as f32 / (self.page_width as f32),
(sh as f32) / (self.page_height as f32))
}
p.relative((cx * 16 + info.0 as u32) as f32 / 256.0,
(cy * 16) as f32 / 256.0,
(info.1 - info.0) as f32 / 256.0,
16.0 / 256.0)
}
pub fn size_of_string(&self, val: &str) -> f64 {
let mut size = 0.0;
for c in val.chars() {
size += self.size_of_char(c) + 2.0;
}
size - 2.0
}
pub fn size_of_char(&self, c: char) -> f64 {
if c =='' {
return 4.0;
}
let r = c as u32;
if r >> 8 == 0 {
let r = (*self.char_map.get(&c).unwrap_or(&c)) as u32;
let info = self.font_character_info[r as usize];
let sw = self.page_width / 16.0;
return (((info.1 - info.0) as f64) / sw) * 16.0;
}
let info = self.font_character_info[c as usize];
(info.1 - info.0) as f64
}
fn load_font(&mut self) {
for page in &mut self.font_pages {
*page = None;
}
let res = self.resources.read().unwrap();
if let Some(mut info) = res.open("minecraft", "font/glyph_sizes.bin") {
let mut data = Vec::with_capacity(0x10000);
info.read_to_end(&mut data).unwrap();
for (i, info) in self.font_character_info.iter_mut().enumerate() {
// Top nibble - start position
// Bottom nibble - end position
info.0 = (data[i] >> 4) as i32;
info.1 = (data[i] & 0xF) as i32 + 1;
}
}
if let Some(mut val) = res.open("minecraft", "textures/font/ascii.png") {
let mut data = Vec::new();
val.read_to_end(&mut data).unwrap();
if let Ok(img) = image::load_from_memory(&data) {
let (width, height) = img.dimensions();
self.page_width = width as f64;
self.page_height = height as f64;
let sw = width / 16;
let sh = height / 16;
for i in 0..256 {
let cx = (i & 0xF) * sw;
let cy = (i >> 4) * sh;
let mut start = true;
'x_loop: for x in 0..sw {
for y in 0..sh {
let a = img.get_pixel(cx + x, cy + y).data[3];
if start && a!= 0 {
self.font_character_info[i as usize].0 = x as i32;
start = false;
continue 'x_loop;
} else if!start && a!= 0 {
continue 'x_loop;
}
}
if!start {
self.font_character_info[i as usize].1 = x as i32;
break;
}
}
}
}
}
}
pub fn new_text(&mut self, val: &str, x: f64, y: f64, r: u8, g: u8, b: u8) -> UIText {
self.new_text_scaled(val, x, y, 1.0, 1.0, r, g, b)
}
pub fn new_text_scaled(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
| r: u8,
g: u8,
b: u8)
-> UIText {
self.create_text(val, x, y, sx, sy, 0.0, r, g, b)
}
pub fn new_text_rotated(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
rotation: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
self.create_text(val, x, y, sx, sy, rotation, r, g, b)
}
fn create_text(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
rotation: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
let mut elements = Vec::new();
let mut offset = 0.0;
for ch in val.chars() {
if ch =='' {
offset += 6.0;
continue;
}
let texture = self.character_texture(ch);
let w = self.size_of_char(ch);
let mut dsx = offset + 2.0;
let mut dsy = 2.0;
let mut dx = offset;
let mut dy = 0.0;
if rotation!= 0.0 {
let c = rotation.cos();
let s = rotation.sin();
let tmpx = dsx - (w * 0.5);
let tmpy = dsy - (16.0 * 0.5);
dsx = (w * 0.5) + (tmpx * c - tmpy * s);
dsy = (16.0 * 0.5) + (tmpy * c + tmpx * s);
let tmpx = dx - (w * 0.5);
let tmpy = dy - (16.0 * 0.5);
dx = (w * 0.5) + (tmpx * c - tmpy * s);
dy = (16.0 * 0.5) + (tmpy * c + tmpx * s);
}
let mut shadow = UIElement::new(&texture,
x + dsx * sx,
y + dsy * sy,
w * sx,
16.0 * sy,
0.0,
0.0,
1.0,
1.0);
shadow.r = ((r as f64) * 0.25) as u8;
shadow.g = ((g as f64) * 0.25) as u8;
shadow.b = ((b as f64) * 0.25) as u8;
shadow.rotation = rotation;
elements.push(shadow);
let mut text = UIElement::new(&texture,
x + dx * sx,
y + dy * sy,
w * sx,
16.0 * sy,
0.0,
0.0,
1.0,
1.0);
text.r = r;
text.g = g;
text.b = b;
text.rotation = rotation;
elements.push(text);
offset += w + 2.0;
}
UIText {
elements: elements,
width: (offset - 2.0) * sx,
}
}
}
pub struct UIText {
pub elements: Vec<UIElement>,
pub width: f64,
}
impl UIText {
pub fn bytes(&self, width: f64, height: f64) -> Vec<u8> {
let mut buf = Vec::with_capacity(28 * 4 * self.elements.len());
for e in &self.elements {
buf.extend(e.bytes(width, height));
}
buf
}
}
pub struct UIElement {
pub x: f64,
pub y: f64,
pub w: f64,
pub h: f64,
pub layer: isize,
pub t_x: u16,
pub t_y: u16,
pub t_w: u16,
pub t_h: u16,
pub t_offsetx: i16,
pub t_offsety: i16,
pub t_atlas: i16,
pub t_sizew: i16,
pub t_sizeh: i16,
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
pub rotation: f64,
}
impl UIElement {
pub fn new(tex: &render::Texture,
x: f64,
y: f64,
width: f64,
height: f64,
tx: f64,
ty: f64,
tw: f64,
th: f64)
-> UIElement {
let twidth = tex.get_width();
let theight = tex.get_height();
UIElement {
x: x / UI_WIDTH,
y: y / UI_HEIGHT,
w: width / UI_WIDTH,
h: height / UI_HEIGHT,
layer: 0,
t_x: tex.get_x() as u16,
t_y: tex.get_y() as u16,
t_w: twidth as u16,
t_h: theight as u16,
t_atlas: tex.atlas as i16,
| identifier_name |
|
ui.rs | _info.enable();
shader.texture_offset.enable();
shader.color.enable();
shader.position.vertex_pointer_int(3, gl::SHORT, 28, 0);
shader.texture_info.vertex_pointer(4, gl::UNSIGNED_SHORT, false, 28, 8);
shader.texture_offset.vertex_pointer_int(3, gl::SHORT, 28, 16);
shader.color.vertex_pointer(4, gl::UNSIGNED_BYTE, true, 28, 24);
let index_buffer = gl::Buffer::new();
index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
let mut pages = Vec::with_capacity(0x100);
for _ in 0..0x100 {
pages.push(Option::None);
}
let mut char_map = HashMap::new();
let ascii_chars = "ÀÁÂÈÊËÍÓÔÕÚßãõğİıŒœŞşŴŵžȇ \
!\"#$%&'()*+,-./0123456789:;\
<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~ \
ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜø£Ø×ƒáíóúñѪº¿®¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞\
╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αβΓπΣσμτΦΘΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■";
for (pos, c) in ascii_chars.chars().enumerate() {
char_map.insert(c, ::std::char::from_u32(pos as u32).unwrap());
}
let mut state = UIState {
textures: textures,
resources: res,
version: 0xFFFF,
data: Vec::new(),
count: 0,
prev_size: 0,
index_type: gl::UNSIGNED_BYTE,
array: array,
buffer: buffer,
index_buffer: index_buffer,
max_index: 0,
shader: shader,
// Font
font_pages: pages,
font_character_info: vec![(0, 0); 0x10000],
char_map: char_map,
page_width: 0.0,
page_height: 0.0,
};
state.load_font();
state
}
pub fn tick(&mut self, width: u32, height: u32) {
{
let version = self.resources.read().unwrap().version();
if self.version!= version {
self.version = version;
self.load_font();
}
}
// Prevent clipping with the world
gl::clear(gl::ClearFlags::Depth);
gl::enable(gl::BLEND);
gl::blend_func(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
self.shader.program.use_program();
self.shader.texture.set_int(0);
if self.count > 0 {
self.array.bind();
if self.max_index < self.count {
let (data, ty) = render::generate_element_buffer(self.count);
self.index_type = ty;
self.index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
self.index_buffer.set_data(gl::ELEMENT_ARRAY_BUFFER, &data, gl::DYNAMIC_DRAW);
self.max_index = self.count;
}
self.shader.screensize.set_float2(width as f32, height as f32);
self.buffer.bind(gl::ARRAY_BUFFER);
self.index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
if self.data.len() > self.prev_size {
self.prev_size = self.data.len();
self.buffer.set_data(gl::ARRAY_BUFFER, &self.data, gl::STREAM_DRAW);
} else {
self.buffer.re_set_data(gl::ARRAY_BUFFER, &self.data);
}
gl::draw_elements(gl::TRIANGLES, self.count as i32, self.index_type, 0);
}
gl::disable(gl::BLEND);
self.data.clear();
self.count = 0;
}
pub fn add_bytes(&mut self, data: &[u8]) {
self.data.extend_from_slice(data);
self.count += (data.len() / (28 * 4)) * 6;
}
pub fn character_texture(&mut self, c: char) -> render::Texture {
let raw = c as u32;
let page = raw >> 8;
// Lazy load fonts to size memory
if self.font_pages[page as usize].is_none() {
let name = if page == 0 {
"font/ascii".to_owned()
} else {
format!("font/unicode_page_{:02X}", page)
};
let textures = self.textures.clone();
self.font_pages[page as usize] = Some(render::Renderer::get_texture(&textures, &name));
}
let p = self.font_pages[page as usize].clone().unwrap();
let raw = if page == 0 {
(*self.char_map.get(&c).unwrap_or(&c)) as u32
} else {
raw
};
let ch = raw & 0xFF;
let cx = ch & 0xF;
let cy = ch >> 4;
let info = self.font_character_info[raw as usize];
if page == 0 {
let sw = (self.page_width / 16.0) as u32;
let sh = (self.page_height / 16.0) as u32;
return p.relative((cx * sw + info.0 as u32) as f32 / (self.page_width as f32),
(cy * sh) as f32 / (self.page_height as f32),
(info.1 - info.0) as f32 / (self.page_width as f32),
(sh as f32) / (self.page_height as f32))
}
p.relative((cx * 16 + info.0 as u32) as f32 / 256.0,
(cy * 16) as f32 / 256.0,
(info.1 - info.0) as f32 / 256.0,
16.0 / 256.0)
}
pub fn size_of_string(&self, val: &str) -> f64 {
let mut size = 0.0;
for c in val.chars() {
size += self.size_of_char(c) + 2.0;
}
size - 2.0
}
pub fn size_of_char(&self, c: char) -> f64 {
if c =='' {
return 4.0;
}
let r = c as u32;
if r >> 8 == 0 {
let r = (*self.char_map.get(&c).unwrap_or(&c)) as u32;
let info = self.font_character_info[r as usize];
let sw = self.page_width / 16.0;
return (((info.1 - info.0) as f64) / sw) * 16.0;
}
let info = self.font_character_info[c as usize];
(info.1 - info.0) as f64
}
fn load_font(&mut self) {
for page in &mut self.font_pages {
*page = None;
}
let res = self.resources.read().unwrap();
if let Some(mut info) = res.open("minecraft", "font/glyph_sizes.bin") {
let mut data = Vec::with_capacity(0x10000);
info.read_to_end(&mut data).unwrap();
for (i, info) in self.font_character_info.iter_mut().enumerate() {
// Top nibble - start position
// Bottom nibble - end position
info.0 = (data[i] >> 4) as i32;
info.1 = (data[i] & 0xF) as i32 + 1;
}
}
if let Some(mut val) = res.open("minecraft", "textures/font/ascii.png") {
let mut data = Vec::new();
val.read_to_end(&mut data).unwrap();
if let Ok(img) = image::load_from_memory(&data) {
let (width, height) = img.dimensions();
self.page_width = width as f64;
self.page_height = height as f64;
let sw = width / 16;
let sh = height / 16;
for i in 0..256 {
let cx = (i & 0xF) * sw;
let cy = (i >> 4) * sh;
let mut start = true;
'x_loop: for x in 0..sw {
for y in 0..sh {
let a = img.get_pixel(cx + x, cy + y).data[3];
if start && a!= 0 {
self.font_character_info[i as usize].0 = x as i32;
start = false;
continue 'x_loop;
} else if!start && a!= 0 {
continue 'x_loop;
}
}
if!start {
self.font_character_info[i as usize].1 = x as i32;
break;
}
}
}
}
}
}
pub fn new_text(&mut self, val: &str, x: f64, y: f64, r: u8, g: u8, b: u8) -> UIText {
self.new_text_scaled(val, x, y, 1.0, 1.0, r, g, b)
}
pub fn new_text_scaled(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
self.create_text(val, x, y, sx, sy, 0.0, r, g, b)
}
pub fn new_text_rotated(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
rotation: f64,
r: u8, | g: u8,
b: u8)
-> UIText {
self.create_text(val, x, y, sx, sy, rotation, r, g, b)
}
fn create_text(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
rotation: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
let mut elements = Vec::new();
let mut offset = 0.0;
for ch in val.chars() {
if ch =='' {
offset += 6.0;
continue;
}
let texture = self.character_texture(ch);
let w = self.size_of_char(ch);
let mut dsx = offset + 2.0;
let mut dsy = 2.0;
let mut dx = offset;
let mut dy = 0.0;
if rotation!= 0.0 {
let c = rotation.cos();
let s = rotation.sin();
let tmpx = dsx - (w * 0.5);
let tmpy = dsy - (16.0 * 0.5);
dsx = (w * 0.5) + (tmpx * c - tmpy * s);
dsy = (16.0 * 0.5) + (tmpy * c + tmpx * s);
let tmpx = dx - (w * 0.5);
let tmpy = dy - (16.0 * 0.5);
dx = (w * 0.5) + (tmpx * c - tmpy * s);
dy = (16.0 * 0.5) + (tmpy * c + tmpx * s);
}
let mut shadow = UIElement::new(&texture,
x + dsx * sx,
y + dsy * sy,
w * sx,
16.0 * sy,
0.0,
0.0,
1.0,
1.0);
shadow.r = ((r as f64) * 0.25) as u8;
shadow.g = ((g as f64) * 0.25) as u8;
shadow.b = ((b as f64) * 0.25) as u8;
shadow.rotation = rotation;
elements.push(shadow);
let mut text = UIElement::new(&texture,
x + dx * sx,
y + dy * sy,
w * sx,
16.0 * sy,
0.0,
0.0,
1.0,
1.0);
text.r = r;
text.g = g;
text.b = b;
text.rotation = rotation;
elements.push(text);
offset += w + 2.0;
}
UIText {
elements: elements,
width: (offset - 2.0) * sx,
}
}
}
pub struct UIText {
pub elements: Vec<UIElement>,
pub width: f64,
}
impl UIText {
pub fn bytes(&self, width: f64, height: f64) -> Vec<u8> {
let mut buf = Vec::with_capacity(28 * 4 * self.elements.len());
for e in &self.elements {
buf.extend(e.bytes(width, height));
}
buf
}
}
pub struct UIElement {
pub x: f64,
pub y: f64,
pub w: f64,
pub h: f64,
pub layer: isize,
pub t_x: u16,
pub t_y: u16,
pub t_w: u16,
pub t_h: u16,
pub t_offsetx: i16,
pub t_offsety: i16,
pub t_atlas: i16,
pub t_sizew: i16,
pub t_sizeh: i16,
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
pub rotation: f64,
}
impl UIElement {
pub fn new(tex: &render::Texture,
x: f64,
y: f64,
width: f64,
height: f64,
tx: f64,
ty: f64,
tw: f64,
th: f64)
-> UIElement {
let twidth = tex.get_width();
let theight = tex.get_height();
UIElement {
x: x / UI_WIDTH,
y: y / UI_HEIGHT,
w: width / UI_WIDTH,
h: height / UI_HEIGHT,
layer: 0,
t_x: tex.get_x() as u16,
t_y: tex.get_y() as u16,
t_w: twidth as u16,
t_h: theight as u16,
t_atlas: tex.atlas as i16,
| random_line_split |
|
ui.rs | info.enable();
shader.texture_offset.enable();
shader.color.enable();
shader.position.vertex_pointer_int(3, gl::SHORT, 28, 0);
shader.texture_info.vertex_pointer(4, gl::UNSIGNED_SHORT, false, 28, 8);
shader.texture_offset.vertex_pointer_int(3, gl::SHORT, 28, 16);
shader.color.vertex_pointer(4, gl::UNSIGNED_BYTE, true, 28, 24);
let index_buffer = gl::Buffer::new();
index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
let mut pages = Vec::with_capacity(0x100);
for _ in 0..0x100 {
pages.push(Option::None);
}
let mut char_map = HashMap::new();
let ascii_chars = "ÀÁÂÈÊËÍÓÔÕÚßãõğİıŒœŞşŴŵžȇ \
!\"#$%&'()*+,-./0123456789:;\
<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~ \
ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜø£Ø×ƒáíóúñѪº¿®¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞\
╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αβΓπΣσμτΦΘΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■";
for (pos, c) in ascii_chars.chars().enumerate() {
char_map.insert(c, ::std::char::from_u32(pos as u32).unwrap());
}
let mut state = UIState {
textures: textures,
resources: res,
version: 0xFFFF,
data: Vec::new(),
count: 0,
prev_size: 0,
index_type: gl::UNSIGNED_BYTE,
array: array,
buffer: buffer,
index_buffer: index_buffer,
max_index: 0,
shader: shader,
// Font
font_pages: pages,
font_character_info: vec![(0, 0); 0x10000],
char_map: char_map,
page_width: 0.0,
page_height: 0.0,
};
state.load_font();
state
}
pub fn tick(&mut self, width: u32, height: u32) {
{
let version = self.resources.read().unwrap().version();
if self.version!= version {
self.version = version;
self.load_font();
}
}
// Prevent clipping with the world
gl::clear(gl::ClearFlags::Depth);
gl::enable(gl::BLEND);
gl::blend_func(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
self.shader.program.use_program();
self.shader.texture.set_int(0);
if self.count > 0 {
self.array.bind();
if self.max_index < self.count {
let (data, ty) = render::generate_element_buffer(self.count);
self.index_type = ty;
self.index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
self.index_buffer.set_data(gl::ELEMENT_ARRAY_BUFFER, &data, gl::DYNAMIC_DRAW);
self.max_index = self.count;
}
self.shader.screensize.set_float2(width as f32, height as f32);
self.buffer.bind(gl::ARRAY_BUFFER);
self.index_buffer.bind(gl::ELEMENT_ARRAY_BUFFER);
if self.data.len() > self.prev_size {
self.prev_size = self.data.len();
self.buffer.set_data(gl::ARRAY_BUFFER, &self.data, gl::STREAM_DRAW);
} else {
self.buffer.re_set_data(gl::ARRAY_BUFFER, &self.data);
}
gl::draw_elements(gl::TRIANGLES, self.count as i32, self.index_type, 0);
}
gl::disable(gl::BLEND);
self.data.clear();
self.count = 0;
}
pub fn add_bytes(&mut self, data: &[u8]) {
self.data.extend_from_slice(data);
self.count += (data.len() / (28 * 4)) * 6;
}
pub fn character_texture(&mut self, c: char) -> render::Texture {
let raw = c as u32;
let page = raw >> 8;
// Lazy load fonts to size memory
if self.font_pages[page as usize].is_none() {
let name = if page == 0 {
"font/ascii".to_owned()
} else {
format!("font/unicode_page_{:02X}", page)
};
let textures = self.textures.clone();
self.font_pages[page as usize] = Some(render::Renderer::get_texture(&textures, &name));
}
let p = self.font_pages[page as usize].clone().unwrap();
let raw = if page == 0 {
(*self.char_map.get(&c).unwrap_or(&c)) as u32
} else {
raw
};
let ch = raw & 0xFF;
let cx = ch & 0xF;
let cy = ch >> 4;
let info = self.font | sw = (self.page_width / 16.0) as u32;
let sh = (self.page_height / 16.0) as u32;
return p.relative((cx * sw + info.0 as u32) as f32 / (self.page_width as f32),
(cy * sh) as f32 / (self.page_height as f32),
(info.1 - info.0) as f32 / (self.page_width as f32),
(sh as f32) / (self.page_height as f32))
}
p.relative((cx * 16 + info.0 as u32) as f32 / 256.0,
(cy * 16) as f32 / 256.0,
(info.1 - info.0) as f32 / 256.0,
16.0 / 256.0)
}
pub fn size_of_string(&self, val: &str) -> f64 {
let mut size = 0.0;
for c in val.chars() {
size += self.size_of_char(c) + 2.0;
}
size - 2.0
}
pub fn size_of_char(&self, c: char) -> f64 {
if c =='' {
return 4.0;
}
let r = c as u32;
if r >> 8 == 0 {
let r = (*self.char_map.get(&c).unwrap_or(&c)) as u32;
let info = self.font_character_info[r as usize];
let sw = self.page_width / 16.0;
return (((info.1 - info.0) as f64) / sw) * 16.0;
}
let info = self.font_character_info[c as usize];
(info.1 - info.0) as f64
}
fn load_font(&mut self) {
for page in &mut self.font_pages {
*page = None;
}
let res = self.resources.read().unwrap();
if let Some(mut info) = res.open("minecraft", "font/glyph_sizes.bin") {
let mut data = Vec::with_capacity(0x10000);
info.read_to_end(&mut data).unwrap();
for (i, info) in self.font_character_info.iter_mut().enumerate() {
// Top nibble - start position
// Bottom nibble - end position
info.0 = (data[i] >> 4) as i32;
info.1 = (data[i] & 0xF) as i32 + 1;
}
}
if let Some(mut val) = res.open("minecraft", "textures/font/ascii.png") {
let mut data = Vec::new();
val.read_to_end(&mut data).unwrap();
if let Ok(img) = image::load_from_memory(&data) {
let (width, height) = img.dimensions();
self.page_width = width as f64;
self.page_height = height as f64;
let sw = width / 16;
let sh = height / 16;
for i in 0..256 {
let cx = (i & 0xF) * sw;
let cy = (i >> 4) * sh;
let mut start = true;
'x_loop: for x in 0..sw {
for y in 0..sh {
let a = img.get_pixel(cx + x, cy + y).data[3];
if start && a!= 0 {
self.font_character_info[i as usize].0 = x as i32;
start = false;
continue 'x_loop;
} else if!start && a!= 0 {
continue 'x_loop;
}
}
if!start {
self.font_character_info[i as usize].1 = x as i32;
break;
}
}
}
}
}
}
pub fn new_text(&mut self, val: &str, x: f64, y: f64, r: u8, g: u8, b: u8) -> UIText {
self.new_text_scaled(val, x, y, 1.0, 1.0, r, g, b)
}
pub fn new_text_scaled(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
self.create_text(val, x, y, sx, sy, 0.0, r, g, b)
}
pub fn new_text_rotated(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
rotation: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
self.create_text(val, x, y, sx, sy, rotation, r, g, b)
}
fn create_text(&mut self,
val: &str,
x: f64,
y: f64,
sx: f64,
sy: f64,
rotation: f64,
r: u8,
g: u8,
b: u8)
-> UIText {
let mut elements = Vec::new();
let mut offset = 0.0;
for ch in val.chars() {
if ch =='' {
offset += 6.0;
continue;
}
let texture = self.character_texture(ch);
let w = self.size_of_char(ch);
let mut dsx = offset + 2.0;
let mut dsy = 2.0;
let mut dx = offset;
let mut dy = 0.0;
if rotation!= 0.0 {
let c = rotation.cos();
let s = rotation.sin();
let tmpx = dsx - (w * 0.5);
let tmpy = dsy - (16.0 * 0.5);
dsx = (w * 0.5) + (tmpx * c - tmpy * s);
dsy = (16.0 * 0.5) + (tmpy * c + tmpx * s);
let tmpx = dx - (w * 0.5);
let tmpy = dy - (16.0 * 0.5);
dx = (w * 0.5) + (tmpx * c - tmpy * s);
dy = (16.0 * 0.5) + (tmpy * c + tmpx * s);
}
let mut shadow = UIElement::new(&texture,
x + dsx * sx,
y + dsy * sy,
w * sx,
16.0 * sy,
0.0,
0.0,
1.0,
1.0);
shadow.r = ((r as f64) * 0.25) as u8;
shadow.g = ((g as f64) * 0.25) as u8;
shadow.b = ((b as f64) * 0.25) as u8;
shadow.rotation = rotation;
elements.push(shadow);
let mut text = UIElement::new(&texture,
x + dx * sx,
y + dy * sy,
w * sx,
16.0 * sy,
0.0,
0.0,
1.0,
1.0);
text.r = r;
text.g = g;
text.b = b;
text.rotation = rotation;
elements.push(text);
offset += w + 2.0;
}
UIText {
elements: elements,
width: (offset - 2.0) * sx,
}
}
}
pub struct UIText {
pub elements: Vec<UIElement>,
pub width: f64,
}
impl UIText {
pub fn bytes(&self, width: f64, height: f64) -> Vec<u8> {
let mut buf = Vec::with_capacity(28 * 4 * self.elements.len());
for e in &self.elements {
buf.extend(e.bytes(width, height));
}
buf
}
}
pub struct UIElement {
pub x: f64,
pub y: f64,
pub w: f64,
pub h: f64,
pub layer: isize,
pub t_x: u16,
pub t_y: u16,
pub t_w: u16,
pub t_h: u16,
pub t_offsetx: i16,
pub t_offsety: i16,
pub t_atlas: i16,
pub t_sizew: i16,
pub t_sizeh: i16,
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
pub rotation: f64,
}
impl UIElement {
pub fn new(tex: &render::Texture,
x: f64,
y: f64,
width: f64,
height: f64,
tx: f64,
ty: f64,
tw: f64,
th: f64)
-> UIElement {
let twidth = tex.get_width();
let theight = tex.get_height();
UIElement {
x: x / UI_WIDTH,
y: y / UI_HEIGHT,
w: width / UI_WIDTH,
h: height / UI_HEIGHT,
layer: 0,
t_x: tex.get_x() as u16,
t_y: tex.get_y() as u16,
t_w: twidth as u16,
t_h: theight as u16,
t_atlas: tex.atlas as i16,
| _character_info[raw as usize];
if page == 0 {
let | conditional_block |
font_context.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use freetype::freetype::FT_Add_Default_Modules;
use freetype::freetype::FT_Done_Library;
use freetype::freetype::FT_Library;
use freetype::freetype::FT_Memory;
use freetype::freetype::FT_MemoryRec_;
use freetype::freetype::FT_New_Library;
use freetype::succeeded;
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use servo_allocator::libc_compat::{malloc, realloc, free};
use servo_allocator::usable_size;
use std::os::raw::{c_long, c_void};
use std::ptr;
use std::rc::Rc;
// We pass a |User| struct -- via an opaque |void*| -- to FreeType each time a new instance is
// created. FreeType passes it back to the ft_alloc/ft_realloc/ft_free callbacks. We use it to
// record the memory usage of each FreeType instance.
pub struct User {
size: usize,
}
extern "C" fn ft_alloc(mem: FT_Memory, req_size: c_long) -> *mut c_void {
unsafe {
let ptr = malloc(req_size as usize);
let ptr = ptr as *mut c_void; // libc::c_void vs std::os::raw::c_void
let actual_size = usable_size(ptr);
let user = (*mem).user as *mut User;
(*user).size += actual_size;
ptr
}
}
extern "C" fn ft_free(mem: FT_Memory, ptr: *mut c_void) {
unsafe {
let actual_size = usable_size(ptr);
let user = (*mem).user as *mut User;
(*user).size -= actual_size;
free(ptr as *mut _);
}
}
extern "C" fn ft_realloc(
mem: FT_Memory,
_old_size: c_long,
new_req_size: c_long,
old_ptr: *mut c_void,
) -> *mut c_void {
unsafe {
let old_actual_size = usable_size(old_ptr);
let new_ptr = realloc(old_ptr as *mut _, new_req_size as usize);
let new_ptr = new_ptr as *mut c_void;
let new_actual_size = usable_size(new_ptr);
let user = (*mem).user as *mut User;
(*user).size += new_actual_size;
(*user).size -= old_actual_size;
new_ptr
}
}
// A |*mut User| field in a struct triggers a "use of `#[derive]` with a raw pointer" warning from
// rustc. But using a typedef avoids this, so...
pub type UserPtr = *mut User;
// WARNING: We need to be careful how we use this struct. See the comment about Rc<> in
// FontContextHandle.
#[derive(Clone, Debug)]
pub struct FreeTypeLibraryHandle {
pub ctx: FT_Library,
mem: FT_Memory,
user: UserPtr,
}
impl Drop for FreeTypeLibraryHandle {
fn drop(&mut self) {
assert!(!self.ctx.is_null());
unsafe {
FT_Done_Library(self.ctx);
Box::from_raw(self.mem);
Box::from_raw(self.user);
}
}
}
impl MallocSizeOf for FreeTypeLibraryHandle {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe {
(*self.user).size +
ops.malloc_size_of(self.ctx as *const _) +
ops.malloc_size_of(self.mem as *const _) +
ops.malloc_size_of(self.user as *const _)
}
}
}
#[derive(Clone, Debug)]
pub struct FontContextHandle {
// WARNING: FreeTypeLibraryHandle contains raw pointers, is clonable, and also implements
// `Drop`. This field needs to be Rc<> to make sure that the `drop` function is only called
// once, otherwise we'll get crashes. Yuk.
pub ctx: Rc<FreeTypeLibraryHandle>,
}
impl MallocSizeOf for FontContextHandle {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.ctx.size_of(ops)
}
}
impl FontContextHandle {
pub fn new() -> FontContextHandle {
let user = Box::into_raw(Box::new(User { size: 0 }));
let mem = Box::into_raw(Box::new(FT_MemoryRec_ {
user: user as *mut c_void,
alloc: Some(ft_alloc),
free: Some(ft_free),
realloc: Some(ft_realloc),
}));
unsafe {
let mut ctx: FT_Library = ptr::null_mut();
let result = FT_New_Library(mem, &mut ctx);
if!succeeded(result) |
FT_Add_Default_Modules(ctx);
FontContextHandle {
ctx: Rc::new(FreeTypeLibraryHandle {
ctx: ctx,
mem: mem,
user: user,
}),
}
}
}
}
| {
panic!("Unable to initialize FreeType library");
} | conditional_block |
font_context.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use freetype::freetype::FT_Add_Default_Modules;
use freetype::freetype::FT_Done_Library;
use freetype::freetype::FT_Library;
use freetype::freetype::FT_Memory;
use freetype::freetype::FT_MemoryRec_;
use freetype::freetype::FT_New_Library;
use freetype::succeeded;
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use servo_allocator::libc_compat::{malloc, realloc, free};
use servo_allocator::usable_size;
use std::os::raw::{c_long, c_void};
use std::ptr;
use std::rc::Rc;
// We pass a |User| struct -- via an opaque |void*| -- to FreeType each time a new instance is
// created. FreeType passes it back to the ft_alloc/ft_realloc/ft_free callbacks. We use it to
// record the memory usage of each FreeType instance.
pub struct User {
size: usize,
}
extern "C" fn ft_alloc(mem: FT_Memory, req_size: c_long) -> *mut c_void {
unsafe {
let ptr = malloc(req_size as usize);
let ptr = ptr as *mut c_void; // libc::c_void vs std::os::raw::c_void
let actual_size = usable_size(ptr);
let user = (*mem).user as *mut User;
(*user).size += actual_size;
ptr
}
}
extern "C" fn ft_free(mem: FT_Memory, ptr: *mut c_void) {
unsafe {
let actual_size = usable_size(ptr);
let user = (*mem).user as *mut User;
(*user).size -= actual_size;
free(ptr as *mut _);
}
}
extern "C" fn ft_realloc(
mem: FT_Memory,
_old_size: c_long,
new_req_size: c_long,
old_ptr: *mut c_void,
) -> *mut c_void {
unsafe {
let old_actual_size = usable_size(old_ptr);
let new_ptr = realloc(old_ptr as *mut _, new_req_size as usize);
let new_ptr = new_ptr as *mut c_void;
let new_actual_size = usable_size(new_ptr);
let user = (*mem).user as *mut User;
(*user).size += new_actual_size;
(*user).size -= old_actual_size;
new_ptr
}
}
// A |*mut User| field in a struct triggers a "use of `#[derive]` with a raw pointer" warning from
// rustc. But using a typedef avoids this, so...
pub type UserPtr = *mut User;
// WARNING: We need to be careful how we use this struct. See the comment about Rc<> in
// FontContextHandle.
#[derive(Clone, Debug)]
pub struct FreeTypeLibraryHandle {
pub ctx: FT_Library,
mem: FT_Memory,
user: UserPtr,
}
impl Drop for FreeTypeLibraryHandle {
fn drop(&mut self) {
assert!(!self.ctx.is_null());
unsafe {
FT_Done_Library(self.ctx);
Box::from_raw(self.mem);
Box::from_raw(self.user);
}
}
}
impl MallocSizeOf for FreeTypeLibraryHandle {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe {
(*self.user).size +
ops.malloc_size_of(self.ctx as *const _) +
ops.malloc_size_of(self.mem as *const _) +
ops.malloc_size_of(self.user as *const _)
}
}
}
#[derive(Clone, Debug)]
pub struct FontContextHandle {
// WARNING: FreeTypeLibraryHandle contains raw pointers, is clonable, and also implements
// `Drop`. This field needs to be Rc<> to make sure that the `drop` function is only called
// once, otherwise we'll get crashes. Yuk.
pub ctx: Rc<FreeTypeLibraryHandle>,
}
impl MallocSizeOf for FontContextHandle {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.ctx.size_of(ops) | }
impl FontContextHandle {
pub fn new() -> FontContextHandle {
let user = Box::into_raw(Box::new(User { size: 0 }));
let mem = Box::into_raw(Box::new(FT_MemoryRec_ {
user: user as *mut c_void,
alloc: Some(ft_alloc),
free: Some(ft_free),
realloc: Some(ft_realloc),
}));
unsafe {
let mut ctx: FT_Library = ptr::null_mut();
let result = FT_New_Library(mem, &mut ctx);
if!succeeded(result) {
panic!("Unable to initialize FreeType library");
}
FT_Add_Default_Modules(ctx);
FontContextHandle {
ctx: Rc::new(FreeTypeLibraryHandle {
ctx: ctx,
mem: mem,
user: user,
}),
}
}
}
} | } | random_line_split |
font_context.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use freetype::freetype::FT_Add_Default_Modules;
use freetype::freetype::FT_Done_Library;
use freetype::freetype::FT_Library;
use freetype::freetype::FT_Memory;
use freetype::freetype::FT_MemoryRec_;
use freetype::freetype::FT_New_Library;
use freetype::succeeded;
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use servo_allocator::libc_compat::{malloc, realloc, free};
use servo_allocator::usable_size;
use std::os::raw::{c_long, c_void};
use std::ptr;
use std::rc::Rc;
// We pass a |User| struct -- via an opaque |void*| -- to FreeType each time a new instance is
// created. FreeType passes it back to the ft_alloc/ft_realloc/ft_free callbacks. We use it to
// record the memory usage of each FreeType instance.
pub struct User {
size: usize,
}
extern "C" fn ft_alloc(mem: FT_Memory, req_size: c_long) -> *mut c_void {
unsafe {
let ptr = malloc(req_size as usize);
let ptr = ptr as *mut c_void; // libc::c_void vs std::os::raw::c_void
let actual_size = usable_size(ptr);
let user = (*mem).user as *mut User;
(*user).size += actual_size;
ptr
}
}
extern "C" fn ft_free(mem: FT_Memory, ptr: *mut c_void) {
unsafe {
let actual_size = usable_size(ptr);
let user = (*mem).user as *mut User;
(*user).size -= actual_size;
free(ptr as *mut _);
}
}
extern "C" fn ft_realloc(
mem: FT_Memory,
_old_size: c_long,
new_req_size: c_long,
old_ptr: *mut c_void,
) -> *mut c_void {
unsafe {
let old_actual_size = usable_size(old_ptr);
let new_ptr = realloc(old_ptr as *mut _, new_req_size as usize);
let new_ptr = new_ptr as *mut c_void;
let new_actual_size = usable_size(new_ptr);
let user = (*mem).user as *mut User;
(*user).size += new_actual_size;
(*user).size -= old_actual_size;
new_ptr
}
}
// A |*mut User| field in a struct triggers a "use of `#[derive]` with a raw pointer" warning from
// rustc. But using a typedef avoids this, so...
pub type UserPtr = *mut User;
// WARNING: We need to be careful how we use this struct. See the comment about Rc<> in
// FontContextHandle.
#[derive(Clone, Debug)]
pub struct FreeTypeLibraryHandle {
pub ctx: FT_Library,
mem: FT_Memory,
user: UserPtr,
}
impl Drop for FreeTypeLibraryHandle {
fn drop(&mut self) {
assert!(!self.ctx.is_null());
unsafe {
FT_Done_Library(self.ctx);
Box::from_raw(self.mem);
Box::from_raw(self.user);
}
}
}
impl MallocSizeOf for FreeTypeLibraryHandle {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe {
(*self.user).size +
ops.malloc_size_of(self.ctx as *const _) +
ops.malloc_size_of(self.mem as *const _) +
ops.malloc_size_of(self.user as *const _)
}
}
}
#[derive(Clone, Debug)]
pub struct FontContextHandle {
// WARNING: FreeTypeLibraryHandle contains raw pointers, is clonable, and also implements
// `Drop`. This field needs to be Rc<> to make sure that the `drop` function is only called
// once, otherwise we'll get crashes. Yuk.
pub ctx: Rc<FreeTypeLibraryHandle>,
}
impl MallocSizeOf for FontContextHandle {
fn | (&self, ops: &mut MallocSizeOfOps) -> usize {
self.ctx.size_of(ops)
}
}
impl FontContextHandle {
pub fn new() -> FontContextHandle {
let user = Box::into_raw(Box::new(User { size: 0 }));
let mem = Box::into_raw(Box::new(FT_MemoryRec_ {
user: user as *mut c_void,
alloc: Some(ft_alloc),
free: Some(ft_free),
realloc: Some(ft_realloc),
}));
unsafe {
let mut ctx: FT_Library = ptr::null_mut();
let result = FT_New_Library(mem, &mut ctx);
if!succeeded(result) {
panic!("Unable to initialize FreeType library");
}
FT_Add_Default_Modules(ctx);
FontContextHandle {
ctx: Rc::new(FreeTypeLibraryHandle {
ctx: ctx,
mem: mem,
user: user,
}),
}
}
}
}
| size_of | identifier_name |
config.rs | use std::io::{Read, Write};
use std::fs::{self, File};
use std::path::PathBuf;
use std::env::home_dir;
use error::*;
use serde_json;
lazy_static! {
pub static ref CONFIG: Config = read_config().expect("Failed to read configuration.");
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub host: String,
pub key: String
}
fn config_path() -> PathBuf {
home_dir()
.expect("Failed to locate home directory.")
.join(".config")
.join("ComfyPush")
.join("client-config.json")
}
pub fn read_config() -> ComfyResult<Config> {
let mut body = String::new();
let mut file = File::open(config_path())?;
file.read_to_string(&mut body)?;
serde_json::from_str(&body)
.map_err(ComfyError::from)
}
pub fn write_config(host: &str, key: &str) -> ComfyResult<Config> | {
let path = config_path();
fs::create_dir_all(&path.parent().unwrap())?; // path.parent() never panics
let mut file = File::open(path)?;
let body = serde_json::to_string(&Config {
host: host.to_string(),
key: key.to_string()
})?;
serde_json::from_str(&body)
.map_err(ComfyError::from)
} | identifier_body |
|
config.rs | use std::io::{Read, Write};
use std::fs::{self, File};
use std::path::PathBuf;
use std::env::home_dir;
use error::*;
use serde_json;
lazy_static! {
pub static ref CONFIG: Config = read_config().expect("Failed to read configuration.");
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub host: String,
pub key: String
}
fn config_path() -> PathBuf {
home_dir()
.expect("Failed to locate home directory.")
.join(".config")
.join("ComfyPush")
.join("client-config.json")
}
pub fn read_config() -> ComfyResult<Config> {
let mut body = String::new();
let mut file = File::open(config_path())?;
file.read_to_string(&mut body)?;
serde_json::from_str(&body) | pub fn write_config(host: &str, key: &str) -> ComfyResult<Config> {
let path = config_path();
fs::create_dir_all(&path.parent().unwrap())?; // path.parent() never panics
let mut file = File::open(path)?;
let body = serde_json::to_string(&Config {
host: host.to_string(),
key: key.to_string()
})?;
serde_json::from_str(&body)
.map_err(ComfyError::from)
} | .map_err(ComfyError::from)
}
| random_line_split |
config.rs | use std::io::{Read, Write};
use std::fs::{self, File};
use std::path::PathBuf;
use std::env::home_dir;
use error::*;
use serde_json;
lazy_static! {
pub static ref CONFIG: Config = read_config().expect("Failed to read configuration.");
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub host: String,
pub key: String
}
fn config_path() -> PathBuf {
home_dir()
.expect("Failed to locate home directory.")
.join(".config")
.join("ComfyPush")
.join("client-config.json")
}
pub fn read_config() -> ComfyResult<Config> {
let mut body = String::new();
let mut file = File::open(config_path())?;
file.read_to_string(&mut body)?;
serde_json::from_str(&body)
.map_err(ComfyError::from)
}
pub fn | (host: &str, key: &str) -> ComfyResult<Config> {
let path = config_path();
fs::create_dir_all(&path.parent().unwrap())?; // path.parent() never panics
let mut file = File::open(path)?;
let body = serde_json::to_string(&Config {
host: host.to_string(),
key: key.to_string()
})?;
serde_json::from_str(&body)
.map_err(ComfyError::from)
}
| write_config | identifier_name |
buddy.rs | //! An implementation of a buddy allocator.
use core::ptr;
use core::mem;
use core::cmp;
use intrusive::link::Link;
use intrusive::list::{List, Node};
use Allocator;
pub type FreeBlock = PhantomNode;
pub type FreeList = List<ptr::Unique<FreeBlock>, FreeBlock>;
#[allow(dead_code)]
pub struct Buddy<'a> {
min_block_size: usize,
min_block_order: u32,
max_order: u32,
free_lists: &'a mut [FreeList]
}
impl<'a> Buddy<'a> {
pub fn new(min_block_size: usize, max_order: u32,
free_lists: &'a mut [FreeList]) -> Self {
assert!(min_block_size >= ::core::mem::size_of::<FreeBlock>());
Buddy {
min_block_size: min_block_size,
min_block_order: Buddy::log2(min_block_size) as u32,
max_order: max_order,
free_lists: free_lists,
}
}
#[allow(exceeding_bitshifts)]
pub fn next_power_of_two(mut num: usize) -> usize {
if num == 0 {
return 1;
}
num -= 1;
num |= num >> 1;
num |= num >> 2;
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
if mem::size_of::<usize>() == mem::size_of::<u64>() {
num |= num >> 32;
}
num + 1
}
pub fn log2(mut num: usize) -> usize {
let mut res = 0;
loop {
num >>= 1;
if num == 0 {
break;
}
res += 1;
}
res
}
/// Add a block of max order
pub unsafe fn add_block(&mut self, start: *mut u8) {
let order = self.max_order;
self.add_block_order(order, start);
}
unsafe fn add_block_order(&mut self, order: u32, start: *mut u8) {
let link = ptr::Unique::new(start as *mut FreeBlock);
self.free_lists[order as usize].push_front(link);
}
unsafe fn split_block(&mut self, block: *mut u8, mut order: u32,
target_order: u32) {
while order > target_order {
order -= 1;
let buddy_offset = self.get_size_from_order(order);
let buddy_ptr = block.offset(buddy_offset as isize);
self.add_block_order(order, buddy_ptr);
}
}
unsafe fn find_and_pop_buddy(&mut self, ptr: *mut u8,
order: u32) -> *mut u8 {
// Max order blocks are not merged
if order == self.max_order {
return ptr::null_mut();
}
let size = self.get_size_from_order(order);
let buddy_ptr = (ptr as usize ^ size) as *mut u8;
let mut cursor = self.free_lists[order as usize].cursor();
let mut found = false;
loop {
match cursor.next_peek() {
None => break,
Some(blk) => {
if blk as *const FreeBlock as *const u8 == buddy_ptr |
}
}
cursor.next();
}
if found {
cursor.remove();
return buddy_ptr
}
ptr::null_mut()
}
fn get_order_from_size(&self, mut size: usize, _align: usize) -> u32 {
size = Buddy::next_power_of_two(size);
size = cmp::max(size, self.min_block_size);
Buddy::log2(size) as u32 - self.min_block_order
}
fn get_size_from_order(&self, order: u32) -> usize {
self.min_block_size * 2usize.pow(order)
}
}
impl<'a> Allocator for Buddy<'a> {
unsafe fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
let order = self.get_order_from_size(size, align);
if order > self.max_order {
return ptr::null_mut();
}
for i in order..(self.max_order + 1) {
let mut tmp = self.free_lists[i as usize].pop_front();
if let Some(block) = tmp.as_mut() {
let ptr = block.get_mut() as *mut FreeBlock as *mut u8;
if i > order {
self.split_block(ptr, i, order);
}
return ptr
}
}
ptr::null_mut()
}
unsafe fn deallocate(&mut self, mut ptr: *mut u8, old_size: usize,
align: usize) {
let mut order = self.get_order_from_size(old_size, align);
loop {
let buddy = self.find_and_pop_buddy(ptr, order);
if buddy == ptr::null_mut() {
break;
}
ptr = cmp::min(ptr, buddy);
order += 1;
}
self.add_block_order(order, ptr);
}
}
pub struct PhantomNode {
prev: Link<PhantomNode>,
next: Link<PhantomNode>,
}
impl Node for PhantomNode {
fn prev(&self) -> &Link<PhantomNode> {
&self.prev
}
fn next(&self) -> &Link<PhantomNode> {
&self.next
}
fn prev_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.prev
}
fn next_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.next
}
}
#[cfg(test)]
mod test {
use core::mem;
use core::ptr;
use Allocator;
use super::{Buddy, FreeList};
const HEAP_ALIGN: usize = 4096;
const HEAP_SIZE: usize = 4096;
// HEAP_ORDER = 5 & MIN_BLOCK_SIZE = 32 => max alloc 1024
const HEAP_ORDER: u32 = 5;
const MIN_BLOCK_SIZE: usize = 32;
extern "C" {
fn memalign(alignment: usize, size: usize) -> *mut u8;
fn free(ptr: *mut u8);
}
#[test]
fn test_buddy_next_power_of_two() {
assert_eq!(Buddy::next_power_of_two(0), 1);
assert_eq!(Buddy::next_power_of_two(2), 2);
assert_eq!(Buddy::next_power_of_two(3), 4);
assert_eq!(Buddy::next_power_of_two(5678), 8192);
assert_eq!(Buddy::next_power_of_two(8192), 8192);
}
#[test]
fn test_buddy_log2() {
assert_eq!(Buddy::log2(0), 0);
assert_eq!(Buddy::log2(1), 0);
assert_eq!(Buddy::log2(2), 1);
assert_eq!(Buddy::log2(4), 2);
assert_eq!(Buddy::log2(8), 3);
assert_eq!(Buddy::log2(16), 4);
assert_eq!(Buddy::log2(0x87654321), 31);
}
#[test]
fn test_buddy_alloc_dealloc() {
unsafe {
let heap = memalign(HEAP_ALIGN, HEAP_SIZE);
let mut free_lists: [_; (HEAP_ORDER + 1) as usize];
free_lists = mem::uninitialized();
for i in 0..(HEAP_ORDER + 1) {
free_lists[i as usize] = FreeList::new();
}
let max_size = MIN_BLOCK_SIZE * 2usize.pow(HEAP_ORDER);
let mut alloc = Buddy::new(MIN_BLOCK_SIZE, HEAP_ORDER,
&mut free_lists[..]);
// Heap is 4Kb in 4 * 1Kb
alloc.add_block(heap.offset(0));
alloc.add_block(heap.offset(1024));
alloc.add_block(heap.offset(2048));
alloc.add_block(heap.offset(3072));
// Allocation is too big
assert_eq!(alloc.allocate(max_size + 1, 1), ptr::null_mut());
{
let max_blk = alloc.allocate(max_size, 1);
let last_blk_offset = ((HEAP_SIZE / max_size) - 1) * max_size;
// Due to Buddy::new using push front, the first allocated block
// is gonna be the last pushed
assert_eq!(max_blk, heap.offset(last_blk_offset as isize));
alloc.deallocate(max_blk, max_size, 1);
}
let blk_32_1 = alloc.allocate(32, 1);
let blk_32_2 = alloc.allocate(32, 1);
assert_eq!(blk_32_1.offset(32), blk_32_2);
let blk_64_1 = alloc.allocate(64, 1);
assert_eq!(blk_32_1.offset(64), blk_64_1);
alloc.deallocate(blk_32_1, 32, 1);
alloc.deallocate(blk_32_2, 32, 1);
let blk_32_1_1 = alloc.allocate(32, 1);
let blk_32_2_1 = alloc.allocate(32, 1);
assert_eq!(blk_32_1_1, blk_32_1);
assert_eq!(blk_32_2_1, blk_32_2);
alloc.deallocate(blk_32_2_1, 32, 1);
alloc.deallocate(blk_32_1_1, 32, 1);
let blk_64_2 = alloc.allocate(64, 1);
assert_eq!(blk_64_2, blk_32_1);
let blk_128 = alloc.allocate(128, 1);
assert_eq!(blk_128, blk_64_1.offset(64));
alloc.deallocate(blk_64_1, 64, 1);
alloc.deallocate(blk_64_2, 64, 1);
alloc.deallocate(blk_128, 128, 1);
free(heap);
}
}
}
| {
found = true;
break;
} | conditional_block |
buddy.rs | //! An implementation of a buddy allocator.
use core::ptr;
use core::mem;
use core::cmp;
use intrusive::link::Link;
use intrusive::list::{List, Node};
use Allocator;
pub type FreeBlock = PhantomNode;
pub type FreeList = List<ptr::Unique<FreeBlock>, FreeBlock>;
#[allow(dead_code)]
pub struct Buddy<'a> {
min_block_size: usize,
min_block_order: u32,
max_order: u32,
free_lists: &'a mut [FreeList]
}
impl<'a> Buddy<'a> {
pub fn new(min_block_size: usize, max_order: u32,
free_lists: &'a mut [FreeList]) -> Self {
assert!(min_block_size >= ::core::mem::size_of::<FreeBlock>());
Buddy {
min_block_size: min_block_size,
min_block_order: Buddy::log2(min_block_size) as u32,
max_order: max_order,
free_lists: free_lists,
} | }
#[allow(exceeding_bitshifts)]
pub fn next_power_of_two(mut num: usize) -> usize {
if num == 0 {
return 1;
}
num -= 1;
num |= num >> 1;
num |= num >> 2;
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
if mem::size_of::<usize>() == mem::size_of::<u64>() {
num |= num >> 32;
}
num + 1
}
pub fn log2(mut num: usize) -> usize {
let mut res = 0;
loop {
num >>= 1;
if num == 0 {
break;
}
res += 1;
}
res
}
/// Add a block of max order
pub unsafe fn add_block(&mut self, start: *mut u8) {
let order = self.max_order;
self.add_block_order(order, start);
}
unsafe fn add_block_order(&mut self, order: u32, start: *mut u8) {
let link = ptr::Unique::new(start as *mut FreeBlock);
self.free_lists[order as usize].push_front(link);
}
unsafe fn split_block(&mut self, block: *mut u8, mut order: u32,
target_order: u32) {
while order > target_order {
order -= 1;
let buddy_offset = self.get_size_from_order(order);
let buddy_ptr = block.offset(buddy_offset as isize);
self.add_block_order(order, buddy_ptr);
}
}
unsafe fn find_and_pop_buddy(&mut self, ptr: *mut u8,
order: u32) -> *mut u8 {
// Max order blocks are not merged
if order == self.max_order {
return ptr::null_mut();
}
let size = self.get_size_from_order(order);
let buddy_ptr = (ptr as usize ^ size) as *mut u8;
let mut cursor = self.free_lists[order as usize].cursor();
let mut found = false;
loop {
match cursor.next_peek() {
None => break,
Some(blk) => {
if blk as *const FreeBlock as *const u8 == buddy_ptr {
found = true;
break;
}
}
}
cursor.next();
}
if found {
cursor.remove();
return buddy_ptr
}
ptr::null_mut()
}
fn get_order_from_size(&self, mut size: usize, _align: usize) -> u32 {
size = Buddy::next_power_of_two(size);
size = cmp::max(size, self.min_block_size);
Buddy::log2(size) as u32 - self.min_block_order
}
fn get_size_from_order(&self, order: u32) -> usize {
self.min_block_size * 2usize.pow(order)
}
}
impl<'a> Allocator for Buddy<'a> {
unsafe fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
let order = self.get_order_from_size(size, align);
if order > self.max_order {
return ptr::null_mut();
}
for i in order..(self.max_order + 1) {
let mut tmp = self.free_lists[i as usize].pop_front();
if let Some(block) = tmp.as_mut() {
let ptr = block.get_mut() as *mut FreeBlock as *mut u8;
if i > order {
self.split_block(ptr, i, order);
}
return ptr
}
}
ptr::null_mut()
}
unsafe fn deallocate(&mut self, mut ptr: *mut u8, old_size: usize,
align: usize) {
let mut order = self.get_order_from_size(old_size, align);
loop {
let buddy = self.find_and_pop_buddy(ptr, order);
if buddy == ptr::null_mut() {
break;
}
ptr = cmp::min(ptr, buddy);
order += 1;
}
self.add_block_order(order, ptr);
}
}
pub struct PhantomNode {
prev: Link<PhantomNode>,
next: Link<PhantomNode>,
}
impl Node for PhantomNode {
fn prev(&self) -> &Link<PhantomNode> {
&self.prev
}
fn next(&self) -> &Link<PhantomNode> {
&self.next
}
fn prev_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.prev
}
fn next_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.next
}
}
#[cfg(test)]
mod test {
use core::mem;
use core::ptr;
use Allocator;
use super::{Buddy, FreeList};
const HEAP_ALIGN: usize = 4096;
const HEAP_SIZE: usize = 4096;
// HEAP_ORDER = 5 & MIN_BLOCK_SIZE = 32 => max alloc 1024
const HEAP_ORDER: u32 = 5;
const MIN_BLOCK_SIZE: usize = 32;
extern "C" {
fn memalign(alignment: usize, size: usize) -> *mut u8;
fn free(ptr: *mut u8);
}
#[test]
fn test_buddy_next_power_of_two() {
assert_eq!(Buddy::next_power_of_two(0), 1);
assert_eq!(Buddy::next_power_of_two(2), 2);
assert_eq!(Buddy::next_power_of_two(3), 4);
assert_eq!(Buddy::next_power_of_two(5678), 8192);
assert_eq!(Buddy::next_power_of_two(8192), 8192);
}
#[test]
fn test_buddy_log2() {
assert_eq!(Buddy::log2(0), 0);
assert_eq!(Buddy::log2(1), 0);
assert_eq!(Buddy::log2(2), 1);
assert_eq!(Buddy::log2(4), 2);
assert_eq!(Buddy::log2(8), 3);
assert_eq!(Buddy::log2(16), 4);
assert_eq!(Buddy::log2(0x87654321), 31);
}
#[test]
fn test_buddy_alloc_dealloc() {
unsafe {
let heap = memalign(HEAP_ALIGN, HEAP_SIZE);
let mut free_lists: [_; (HEAP_ORDER + 1) as usize];
free_lists = mem::uninitialized();
for i in 0..(HEAP_ORDER + 1) {
free_lists[i as usize] = FreeList::new();
}
let max_size = MIN_BLOCK_SIZE * 2usize.pow(HEAP_ORDER);
let mut alloc = Buddy::new(MIN_BLOCK_SIZE, HEAP_ORDER,
&mut free_lists[..]);
// Heap is 4Kb in 4 * 1Kb
alloc.add_block(heap.offset(0));
alloc.add_block(heap.offset(1024));
alloc.add_block(heap.offset(2048));
alloc.add_block(heap.offset(3072));
// Allocation is too big
assert_eq!(alloc.allocate(max_size + 1, 1), ptr::null_mut());
{
let max_blk = alloc.allocate(max_size, 1);
let last_blk_offset = ((HEAP_SIZE / max_size) - 1) * max_size;
// Due to Buddy::new using push front, the first allocated block
// is gonna be the last pushed
assert_eq!(max_blk, heap.offset(last_blk_offset as isize));
alloc.deallocate(max_blk, max_size, 1);
}
let blk_32_1 = alloc.allocate(32, 1);
let blk_32_2 = alloc.allocate(32, 1);
assert_eq!(blk_32_1.offset(32), blk_32_2);
let blk_64_1 = alloc.allocate(64, 1);
assert_eq!(blk_32_1.offset(64), blk_64_1);
alloc.deallocate(blk_32_1, 32, 1);
alloc.deallocate(blk_32_2, 32, 1);
let blk_32_1_1 = alloc.allocate(32, 1);
let blk_32_2_1 = alloc.allocate(32, 1);
assert_eq!(blk_32_1_1, blk_32_1);
assert_eq!(blk_32_2_1, blk_32_2);
alloc.deallocate(blk_32_2_1, 32, 1);
alloc.deallocate(blk_32_1_1, 32, 1);
let blk_64_2 = alloc.allocate(64, 1);
assert_eq!(blk_64_2, blk_32_1);
let blk_128 = alloc.allocate(128, 1);
assert_eq!(blk_128, blk_64_1.offset(64));
alloc.deallocate(blk_64_1, 64, 1);
alloc.deallocate(blk_64_2, 64, 1);
alloc.deallocate(blk_128, 128, 1);
free(heap);
}
}
} | random_line_split |
|
buddy.rs | //! An implementation of a buddy allocator.
use core::ptr;
use core::mem;
use core::cmp;
use intrusive::link::Link;
use intrusive::list::{List, Node};
use Allocator;
pub type FreeBlock = PhantomNode;
pub type FreeList = List<ptr::Unique<FreeBlock>, FreeBlock>;
#[allow(dead_code)]
pub struct Buddy<'a> {
min_block_size: usize,
min_block_order: u32,
max_order: u32,
free_lists: &'a mut [FreeList]
}
impl<'a> Buddy<'a> {
pub fn new(min_block_size: usize, max_order: u32,
free_lists: &'a mut [FreeList]) -> Self |
#[allow(exceeding_bitshifts)]
pub fn next_power_of_two(mut num: usize) -> usize {
if num == 0 {
return 1;
}
num -= 1;
num |= num >> 1;
num |= num >> 2;
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
if mem::size_of::<usize>() == mem::size_of::<u64>() {
num |= num >> 32;
}
num + 1
}
pub fn log2(mut num: usize) -> usize {
let mut res = 0;
loop {
num >>= 1;
if num == 0 {
break;
}
res += 1;
}
res
}
/// Add a block of max order
pub unsafe fn add_block(&mut self, start: *mut u8) {
let order = self.max_order;
self.add_block_order(order, start);
}
unsafe fn add_block_order(&mut self, order: u32, start: *mut u8) {
let link = ptr::Unique::new(start as *mut FreeBlock);
self.free_lists[order as usize].push_front(link);
}
unsafe fn split_block(&mut self, block: *mut u8, mut order: u32,
target_order: u32) {
while order > target_order {
order -= 1;
let buddy_offset = self.get_size_from_order(order);
let buddy_ptr = block.offset(buddy_offset as isize);
self.add_block_order(order, buddy_ptr);
}
}
unsafe fn find_and_pop_buddy(&mut self, ptr: *mut u8,
order: u32) -> *mut u8 {
// Max order blocks are not merged
if order == self.max_order {
return ptr::null_mut();
}
let size = self.get_size_from_order(order);
let buddy_ptr = (ptr as usize ^ size) as *mut u8;
let mut cursor = self.free_lists[order as usize].cursor();
let mut found = false;
loop {
match cursor.next_peek() {
None => break,
Some(blk) => {
if blk as *const FreeBlock as *const u8 == buddy_ptr {
found = true;
break;
}
}
}
cursor.next();
}
if found {
cursor.remove();
return buddy_ptr
}
ptr::null_mut()
}
fn get_order_from_size(&self, mut size: usize, _align: usize) -> u32 {
size = Buddy::next_power_of_two(size);
size = cmp::max(size, self.min_block_size);
Buddy::log2(size) as u32 - self.min_block_order
}
fn get_size_from_order(&self, order: u32) -> usize {
self.min_block_size * 2usize.pow(order)
}
}
impl<'a> Allocator for Buddy<'a> {
unsafe fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
let order = self.get_order_from_size(size, align);
if order > self.max_order {
return ptr::null_mut();
}
for i in order..(self.max_order + 1) {
let mut tmp = self.free_lists[i as usize].pop_front();
if let Some(block) = tmp.as_mut() {
let ptr = block.get_mut() as *mut FreeBlock as *mut u8;
if i > order {
self.split_block(ptr, i, order);
}
return ptr
}
}
ptr::null_mut()
}
unsafe fn deallocate(&mut self, mut ptr: *mut u8, old_size: usize,
align: usize) {
let mut order = self.get_order_from_size(old_size, align);
loop {
let buddy = self.find_and_pop_buddy(ptr, order);
if buddy == ptr::null_mut() {
break;
}
ptr = cmp::min(ptr, buddy);
order += 1;
}
self.add_block_order(order, ptr);
}
}
pub struct PhantomNode {
prev: Link<PhantomNode>,
next: Link<PhantomNode>,
}
impl Node for PhantomNode {
fn prev(&self) -> &Link<PhantomNode> {
&self.prev
}
fn next(&self) -> &Link<PhantomNode> {
&self.next
}
fn prev_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.prev
}
fn next_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.next
}
}
#[cfg(test)]
mod test {
use core::mem;
use core::ptr;
use Allocator;
use super::{Buddy, FreeList};
const HEAP_ALIGN: usize = 4096;
const HEAP_SIZE: usize = 4096;
// HEAP_ORDER = 5 & MIN_BLOCK_SIZE = 32 => max alloc 1024
const HEAP_ORDER: u32 = 5;
const MIN_BLOCK_SIZE: usize = 32;
extern "C" {
fn memalign(alignment: usize, size: usize) -> *mut u8;
fn free(ptr: *mut u8);
}
#[test]
fn test_buddy_next_power_of_two() {
assert_eq!(Buddy::next_power_of_two(0), 1);
assert_eq!(Buddy::next_power_of_two(2), 2);
assert_eq!(Buddy::next_power_of_two(3), 4);
assert_eq!(Buddy::next_power_of_two(5678), 8192);
assert_eq!(Buddy::next_power_of_two(8192), 8192);
}
#[test]
fn test_buddy_log2() {
assert_eq!(Buddy::log2(0), 0);
assert_eq!(Buddy::log2(1), 0);
assert_eq!(Buddy::log2(2), 1);
assert_eq!(Buddy::log2(4), 2);
assert_eq!(Buddy::log2(8), 3);
assert_eq!(Buddy::log2(16), 4);
assert_eq!(Buddy::log2(0x87654321), 31);
}
#[test]
fn test_buddy_alloc_dealloc() {
unsafe {
let heap = memalign(HEAP_ALIGN, HEAP_SIZE);
let mut free_lists: [_; (HEAP_ORDER + 1) as usize];
free_lists = mem::uninitialized();
for i in 0..(HEAP_ORDER + 1) {
free_lists[i as usize] = FreeList::new();
}
let max_size = MIN_BLOCK_SIZE * 2usize.pow(HEAP_ORDER);
let mut alloc = Buddy::new(MIN_BLOCK_SIZE, HEAP_ORDER,
&mut free_lists[..]);
// Heap is 4Kb in 4 * 1Kb
alloc.add_block(heap.offset(0));
alloc.add_block(heap.offset(1024));
alloc.add_block(heap.offset(2048));
alloc.add_block(heap.offset(3072));
// Allocation is too big
assert_eq!(alloc.allocate(max_size + 1, 1), ptr::null_mut());
{
let max_blk = alloc.allocate(max_size, 1);
let last_blk_offset = ((HEAP_SIZE / max_size) - 1) * max_size;
// Due to Buddy::new using push front, the first allocated block
// is gonna be the last pushed
assert_eq!(max_blk, heap.offset(last_blk_offset as isize));
alloc.deallocate(max_blk, max_size, 1);
}
let blk_32_1 = alloc.allocate(32, 1);
let blk_32_2 = alloc.allocate(32, 1);
assert_eq!(blk_32_1.offset(32), blk_32_2);
let blk_64_1 = alloc.allocate(64, 1);
assert_eq!(blk_32_1.offset(64), blk_64_1);
alloc.deallocate(blk_32_1, 32, 1);
alloc.deallocate(blk_32_2, 32, 1);
let blk_32_1_1 = alloc.allocate(32, 1);
let blk_32_2_1 = alloc.allocate(32, 1);
assert_eq!(blk_32_1_1, blk_32_1);
assert_eq!(blk_32_2_1, blk_32_2);
alloc.deallocate(blk_32_2_1, 32, 1);
alloc.deallocate(blk_32_1_1, 32, 1);
let blk_64_2 = alloc.allocate(64, 1);
assert_eq!(blk_64_2, blk_32_1);
let blk_128 = alloc.allocate(128, 1);
assert_eq!(blk_128, blk_64_1.offset(64));
alloc.deallocate(blk_64_1, 64, 1);
alloc.deallocate(blk_64_2, 64, 1);
alloc.deallocate(blk_128, 128, 1);
free(heap);
}
}
}
| {
assert!(min_block_size >= ::core::mem::size_of::<FreeBlock>());
Buddy {
min_block_size: min_block_size,
min_block_order: Buddy::log2(min_block_size) as u32,
max_order: max_order,
free_lists: free_lists,
}
} | identifier_body |
buddy.rs | //! An implementation of a buddy allocator.
use core::ptr;
use core::mem;
use core::cmp;
use intrusive::link::Link;
use intrusive::list::{List, Node};
use Allocator;
pub type FreeBlock = PhantomNode;
pub type FreeList = List<ptr::Unique<FreeBlock>, FreeBlock>;
#[allow(dead_code)]
pub struct Buddy<'a> {
min_block_size: usize,
min_block_order: u32,
max_order: u32,
free_lists: &'a mut [FreeList]
}
impl<'a> Buddy<'a> {
pub fn new(min_block_size: usize, max_order: u32,
free_lists: &'a mut [FreeList]) -> Self {
assert!(min_block_size >= ::core::mem::size_of::<FreeBlock>());
Buddy {
min_block_size: min_block_size,
min_block_order: Buddy::log2(min_block_size) as u32,
max_order: max_order,
free_lists: free_lists,
}
}
#[allow(exceeding_bitshifts)]
pub fn next_power_of_two(mut num: usize) -> usize {
if num == 0 {
return 1;
}
num -= 1;
num |= num >> 1;
num |= num >> 2;
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
if mem::size_of::<usize>() == mem::size_of::<u64>() {
num |= num >> 32;
}
num + 1
}
pub fn log2(mut num: usize) -> usize {
let mut res = 0;
loop {
num >>= 1;
if num == 0 {
break;
}
res += 1;
}
res
}
/// Add a block of max order
pub unsafe fn add_block(&mut self, start: *mut u8) {
let order = self.max_order;
self.add_block_order(order, start);
}
unsafe fn | (&mut self, order: u32, start: *mut u8) {
let link = ptr::Unique::new(start as *mut FreeBlock);
self.free_lists[order as usize].push_front(link);
}
unsafe fn split_block(&mut self, block: *mut u8, mut order: u32,
target_order: u32) {
while order > target_order {
order -= 1;
let buddy_offset = self.get_size_from_order(order);
let buddy_ptr = block.offset(buddy_offset as isize);
self.add_block_order(order, buddy_ptr);
}
}
unsafe fn find_and_pop_buddy(&mut self, ptr: *mut u8,
order: u32) -> *mut u8 {
// Max order blocks are not merged
if order == self.max_order {
return ptr::null_mut();
}
let size = self.get_size_from_order(order);
let buddy_ptr = (ptr as usize ^ size) as *mut u8;
let mut cursor = self.free_lists[order as usize].cursor();
let mut found = false;
loop {
match cursor.next_peek() {
None => break,
Some(blk) => {
if blk as *const FreeBlock as *const u8 == buddy_ptr {
found = true;
break;
}
}
}
cursor.next();
}
if found {
cursor.remove();
return buddy_ptr
}
ptr::null_mut()
}
fn get_order_from_size(&self, mut size: usize, _align: usize) -> u32 {
size = Buddy::next_power_of_two(size);
size = cmp::max(size, self.min_block_size);
Buddy::log2(size) as u32 - self.min_block_order
}
fn get_size_from_order(&self, order: u32) -> usize {
self.min_block_size * 2usize.pow(order)
}
}
impl<'a> Allocator for Buddy<'a> {
unsafe fn allocate(&mut self, size: usize, align: usize) -> *mut u8 {
let order = self.get_order_from_size(size, align);
if order > self.max_order {
return ptr::null_mut();
}
for i in order..(self.max_order + 1) {
let mut tmp = self.free_lists[i as usize].pop_front();
if let Some(block) = tmp.as_mut() {
let ptr = block.get_mut() as *mut FreeBlock as *mut u8;
if i > order {
self.split_block(ptr, i, order);
}
return ptr
}
}
ptr::null_mut()
}
unsafe fn deallocate(&mut self, mut ptr: *mut u8, old_size: usize,
align: usize) {
let mut order = self.get_order_from_size(old_size, align);
loop {
let buddy = self.find_and_pop_buddy(ptr, order);
if buddy == ptr::null_mut() {
break;
}
ptr = cmp::min(ptr, buddy);
order += 1;
}
self.add_block_order(order, ptr);
}
}
pub struct PhantomNode {
prev: Link<PhantomNode>,
next: Link<PhantomNode>,
}
impl Node for PhantomNode {
fn prev(&self) -> &Link<PhantomNode> {
&self.prev
}
fn next(&self) -> &Link<PhantomNode> {
&self.next
}
fn prev_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.prev
}
fn next_mut(&mut self) -> &mut Link<PhantomNode> {
&mut self.next
}
}
#[cfg(test)]
mod test {
use core::mem;
use core::ptr;
use Allocator;
use super::{Buddy, FreeList};
const HEAP_ALIGN: usize = 4096;
const HEAP_SIZE: usize = 4096;
// HEAP_ORDER = 5 & MIN_BLOCK_SIZE = 32 => max alloc 1024
const HEAP_ORDER: u32 = 5;
const MIN_BLOCK_SIZE: usize = 32;
extern "C" {
fn memalign(alignment: usize, size: usize) -> *mut u8;
fn free(ptr: *mut u8);
}
#[test]
fn test_buddy_next_power_of_two() {
assert_eq!(Buddy::next_power_of_two(0), 1);
assert_eq!(Buddy::next_power_of_two(2), 2);
assert_eq!(Buddy::next_power_of_two(3), 4);
assert_eq!(Buddy::next_power_of_two(5678), 8192);
assert_eq!(Buddy::next_power_of_two(8192), 8192);
}
#[test]
fn test_buddy_log2() {
assert_eq!(Buddy::log2(0), 0);
assert_eq!(Buddy::log2(1), 0);
assert_eq!(Buddy::log2(2), 1);
assert_eq!(Buddy::log2(4), 2);
assert_eq!(Buddy::log2(8), 3);
assert_eq!(Buddy::log2(16), 4);
assert_eq!(Buddy::log2(0x87654321), 31);
}
#[test]
fn test_buddy_alloc_dealloc() {
unsafe {
let heap = memalign(HEAP_ALIGN, HEAP_SIZE);
let mut free_lists: [_; (HEAP_ORDER + 1) as usize];
free_lists = mem::uninitialized();
for i in 0..(HEAP_ORDER + 1) {
free_lists[i as usize] = FreeList::new();
}
let max_size = MIN_BLOCK_SIZE * 2usize.pow(HEAP_ORDER);
let mut alloc = Buddy::new(MIN_BLOCK_SIZE, HEAP_ORDER,
&mut free_lists[..]);
// Heap is 4Kb in 4 * 1Kb
alloc.add_block(heap.offset(0));
alloc.add_block(heap.offset(1024));
alloc.add_block(heap.offset(2048));
alloc.add_block(heap.offset(3072));
// Allocation is too big
assert_eq!(alloc.allocate(max_size + 1, 1), ptr::null_mut());
{
let max_blk = alloc.allocate(max_size, 1);
let last_blk_offset = ((HEAP_SIZE / max_size) - 1) * max_size;
// Due to Buddy::new using push front, the first allocated block
// is gonna be the last pushed
assert_eq!(max_blk, heap.offset(last_blk_offset as isize));
alloc.deallocate(max_blk, max_size, 1);
}
let blk_32_1 = alloc.allocate(32, 1);
let blk_32_2 = alloc.allocate(32, 1);
assert_eq!(blk_32_1.offset(32), blk_32_2);
let blk_64_1 = alloc.allocate(64, 1);
assert_eq!(blk_32_1.offset(64), blk_64_1);
alloc.deallocate(blk_32_1, 32, 1);
alloc.deallocate(blk_32_2, 32, 1);
let blk_32_1_1 = alloc.allocate(32, 1);
let blk_32_2_1 = alloc.allocate(32, 1);
assert_eq!(blk_32_1_1, blk_32_1);
assert_eq!(blk_32_2_1, blk_32_2);
alloc.deallocate(blk_32_2_1, 32, 1);
alloc.deallocate(blk_32_1_1, 32, 1);
let blk_64_2 = alloc.allocate(64, 1);
assert_eq!(blk_64_2, blk_32_1);
let blk_128 = alloc.allocate(128, 1);
assert_eq!(blk_128, blk_64_1.offset(64));
alloc.deallocate(blk_64_1, 64, 1);
alloc.deallocate(blk_64_2, 64, 1);
alloc.deallocate(blk_128, 128, 1);
free(heap);
}
}
}
| add_block_order | identifier_name |
mod.rs | // Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use middle::typing::inference::*;
use middle::typing::bottom_up_unit::*;
use middle::typing::top_down_unit::*;
use middle::typing::bottom_up_tuple::*;
// use middle::typing::printer::*;
use middle::typing::ast::*;
use middle::typing::recursive_type::*;
use monad::partial::Partial;
pub mod ast;
mod inference;
mod bottom_up_tuple; |
pub fn type_inference(cx: &ExtCtxt, agrammar: AGrammar) -> Partial<Grammar> {
let mut grammar = Grammar {
name: agrammar.name,
rules: HashMap::with_capacity(agrammar.rules.len()),
rust_functions: agrammar.rust_functions,
rust_items: agrammar.rust_items,
attributes: agrammar.attributes
};
InferenceEngine::infer(&mut grammar, agrammar.rules);
bottom_up_unit_inference(&mut grammar);
top_down_unit_inference(&mut grammar);
// print_annotated_rules(&grammar);
recursive_type_analysis(cx, grammar)
.and_then(|grammar| bottom_up_tuple_inference(grammar))
} | mod bottom_up_unit;
mod top_down_unit;
mod recursive_type;
// mod printer; | random_line_split |
mod.rs | // Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use middle::typing::inference::*;
use middle::typing::bottom_up_unit::*;
use middle::typing::top_down_unit::*;
use middle::typing::bottom_up_tuple::*;
// use middle::typing::printer::*;
use middle::typing::ast::*;
use middle::typing::recursive_type::*;
use monad::partial::Partial;
pub mod ast;
mod inference;
mod bottom_up_tuple;
mod bottom_up_unit;
mod top_down_unit;
mod recursive_type;
// mod printer;
pub fn type_inference(cx: &ExtCtxt, agrammar: AGrammar) -> Partial<Grammar> | {
let mut grammar = Grammar {
name: agrammar.name,
rules: HashMap::with_capacity(agrammar.rules.len()),
rust_functions: agrammar.rust_functions,
rust_items: agrammar.rust_items,
attributes: agrammar.attributes
};
InferenceEngine::infer(&mut grammar, agrammar.rules);
bottom_up_unit_inference(&mut grammar);
top_down_unit_inference(&mut grammar);
// print_annotated_rules(&grammar);
recursive_type_analysis(cx, grammar)
.and_then(|grammar| bottom_up_tuple_inference(grammar))
} | identifier_body |
|
mod.rs | // Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use middle::typing::inference::*;
use middle::typing::bottom_up_unit::*;
use middle::typing::top_down_unit::*;
use middle::typing::bottom_up_tuple::*;
// use middle::typing::printer::*;
use middle::typing::ast::*;
use middle::typing::recursive_type::*;
use monad::partial::Partial;
pub mod ast;
mod inference;
mod bottom_up_tuple;
mod bottom_up_unit;
mod top_down_unit;
mod recursive_type;
// mod printer;
pub fn | (cx: &ExtCtxt, agrammar: AGrammar) -> Partial<Grammar> {
let mut grammar = Grammar {
name: agrammar.name,
rules: HashMap::with_capacity(agrammar.rules.len()),
rust_functions: agrammar.rust_functions,
rust_items: agrammar.rust_items,
attributes: agrammar.attributes
};
InferenceEngine::infer(&mut grammar, agrammar.rules);
bottom_up_unit_inference(&mut grammar);
top_down_unit_inference(&mut grammar);
// print_annotated_rules(&grammar);
recursive_type_analysis(cx, grammar)
.and_then(|grammar| bottom_up_tuple_inference(grammar))
}
| type_inference | identifier_name |
buffer.rs | use std::io;
use crossbeam::sync::MsQueue;
// Ensures that we have enough buffers to keep workers busy.
const TOTAL_BUFFERS_MULTIPLICATIVE: usize = 2;
const TOTAL_BUFFERS_ADDITIVE: usize = 0;
/// Stores a set number of piece buffers to be used and re-used.
pub struct PieceBuffers {
piece_queue: MsQueue<PieceBuffer>,
}
impl PieceBuffers {
/// Create a new queue filled with a number of piece buffers based on the number of workers.
pub fn new(piece_length: usize, num_workers: usize) -> PieceBuffers {
let piece_queue = MsQueue::new();
let total_buffers = calculate_total_buffers(num_workers);
for _ in 0..total_buffers {
piece_queue.push(PieceBuffer::new(piece_length));
}
PieceBuffers { piece_queue: piece_queue }
}
/// Checkin the given piece buffer to be re-used.
pub fn checkin(&self, mut buffer: PieceBuffer) {
buffer.bytes_read = 0;
self.piece_queue.push(buffer);
}
/// Checkout a piece buffer (possibly blocking) to be used.
pub fn checkout(&self) -> PieceBuffer {
self.piece_queue.pop()
}
}
/// Calculates the optimal number of piece buffers given the number of workers.
fn | (num_workers: usize) -> usize {
num_workers * TOTAL_BUFFERS_MULTIPLICATIVE + TOTAL_BUFFERS_ADDITIVE
}
// ----------------------------------------------------------------------------//
/// Piece buffer that can be filled up until it contains a full piece.
#[derive(PartialEq, Eq)]
pub struct PieceBuffer {
buffer: Vec<u8>,
bytes_read: usize,
}
impl PieceBuffer {
/// Create a new piece buffer.
fn new(piece_length: usize) -> PieceBuffer {
PieceBuffer {
buffer: vec![0u8; piece_length],
bytes_read: 0,
}
}
pub fn write_bytes<C>(&mut self, mut callback: C) -> io::Result<usize>
where C: FnMut(&mut [u8]) -> io::Result<usize>
{
let new_bytes_read = try!(callback(&mut self.buffer[self.bytes_read..]));
self.bytes_read += new_bytes_read;
Ok(new_bytes_read)
}
/// Whether or not the given piece buffer is full.
pub fn is_whole(&self) -> bool {
self.bytes_read == self.buffer.len()
}
/// Whether or not the given piece buffer is empty.
pub fn is_empty(&self) -> bool {
self.bytes_read == 0
}
/// Access the piece buffer as a byte slice.
pub fn as_slice(&self) -> &[u8] {
&self.buffer[..self.bytes_read]
}
}
| calculate_total_buffers | identifier_name |
buffer.rs | use std::io;
use crossbeam::sync::MsQueue;
// Ensures that we have enough buffers to keep workers busy.
const TOTAL_BUFFERS_MULTIPLICATIVE: usize = 2;
const TOTAL_BUFFERS_ADDITIVE: usize = 0;
/// Stores a set number of piece buffers to be used and re-used.
pub struct PieceBuffers {
piece_queue: MsQueue<PieceBuffer>,
}
impl PieceBuffers {
/// Create a new queue filled with a number of piece buffers based on the number of workers.
pub fn new(piece_length: usize, num_workers: usize) -> PieceBuffers {
let piece_queue = MsQueue::new();
let total_buffers = calculate_total_buffers(num_workers);
for _ in 0..total_buffers {
piece_queue.push(PieceBuffer::new(piece_length));
}
PieceBuffers { piece_queue: piece_queue }
}
/// Checkin the given piece buffer to be re-used.
pub fn checkin(&self, mut buffer: PieceBuffer) {
buffer.bytes_read = 0;
self.piece_queue.push(buffer);
}
/// Checkout a piece buffer (possibly blocking) to be used.
pub fn checkout(&self) -> PieceBuffer |
}
/// Calculates the optimal number of piece buffers given the number of workers.
fn calculate_total_buffers(num_workers: usize) -> usize {
num_workers * TOTAL_BUFFERS_MULTIPLICATIVE + TOTAL_BUFFERS_ADDITIVE
}
// ----------------------------------------------------------------------------//
/// Piece buffer that can be filled up until it contains a full piece.
#[derive(PartialEq, Eq)]
pub struct PieceBuffer {
buffer: Vec<u8>,
bytes_read: usize,
}
impl PieceBuffer {
/// Create a new piece buffer.
fn new(piece_length: usize) -> PieceBuffer {
PieceBuffer {
buffer: vec![0u8; piece_length],
bytes_read: 0,
}
}
pub fn write_bytes<C>(&mut self, mut callback: C) -> io::Result<usize>
where C: FnMut(&mut [u8]) -> io::Result<usize>
{
let new_bytes_read = try!(callback(&mut self.buffer[self.bytes_read..]));
self.bytes_read += new_bytes_read;
Ok(new_bytes_read)
}
/// Whether or not the given piece buffer is full.
pub fn is_whole(&self) -> bool {
self.bytes_read == self.buffer.len()
}
/// Whether or not the given piece buffer is empty.
pub fn is_empty(&self) -> bool {
self.bytes_read == 0
}
/// Access the piece buffer as a byte slice.
pub fn as_slice(&self) -> &[u8] {
&self.buffer[..self.bytes_read]
}
}
| {
self.piece_queue.pop()
} | identifier_body |
buffer.rs | use std::io;
use crossbeam::sync::MsQueue;
// Ensures that we have enough buffers to keep workers busy.
const TOTAL_BUFFERS_MULTIPLICATIVE: usize = 2;
const TOTAL_BUFFERS_ADDITIVE: usize = 0;
/// Stores a set number of piece buffers to be used and re-used.
pub struct PieceBuffers {
piece_queue: MsQueue<PieceBuffer>,
}
impl PieceBuffers {
/// Create a new queue filled with a number of piece buffers based on the number of workers.
pub fn new(piece_length: usize, num_workers: usize) -> PieceBuffers {
let piece_queue = MsQueue::new();
let total_buffers = calculate_total_buffers(num_workers);
for _ in 0..total_buffers {
piece_queue.push(PieceBuffer::new(piece_length)); | PieceBuffers { piece_queue: piece_queue }
}
/// Checkin the given piece buffer to be re-used.
pub fn checkin(&self, mut buffer: PieceBuffer) {
buffer.bytes_read = 0;
self.piece_queue.push(buffer);
}
/// Checkout a piece buffer (possibly blocking) to be used.
pub fn checkout(&self) -> PieceBuffer {
self.piece_queue.pop()
}
}
/// Calculates the optimal number of piece buffers given the number of workers.
fn calculate_total_buffers(num_workers: usize) -> usize {
num_workers * TOTAL_BUFFERS_MULTIPLICATIVE + TOTAL_BUFFERS_ADDITIVE
}
// ----------------------------------------------------------------------------//
/// Piece buffer that can be filled up until it contains a full piece.
#[derive(PartialEq, Eq)]
pub struct PieceBuffer {
buffer: Vec<u8>,
bytes_read: usize,
}
impl PieceBuffer {
/// Create a new piece buffer.
fn new(piece_length: usize) -> PieceBuffer {
PieceBuffer {
buffer: vec![0u8; piece_length],
bytes_read: 0,
}
}
pub fn write_bytes<C>(&mut self, mut callback: C) -> io::Result<usize>
where C: FnMut(&mut [u8]) -> io::Result<usize>
{
let new_bytes_read = try!(callback(&mut self.buffer[self.bytes_read..]));
self.bytes_read += new_bytes_read;
Ok(new_bytes_read)
}
/// Whether or not the given piece buffer is full.
pub fn is_whole(&self) -> bool {
self.bytes_read == self.buffer.len()
}
/// Whether or not the given piece buffer is empty.
pub fn is_empty(&self) -> bool {
self.bytes_read == 0
}
/// Access the piece buffer as a byte slice.
pub fn as_slice(&self) -> &[u8] {
&self.buffer[..self.bytes_read]
}
} | }
| random_line_split |
day_10.rs | pub use tdd_kata::stack_kata::day_10::Stack;
describe! stack_tests {
before_each {
let mut stack: Stack<i32> = Stack::new(20);
}
it "should create a new empty stack" {
assert_eq!(stack.size(), 0);
assert!(stack.is_empty());
}
it "should increase stack size when push into it" {
let old_size = stack.size();
stack.push(10);
assert_eq!(stack.size(), old_size + 1);
assert!(!stack.is_empty()); |
it "should decrease stack size when pop from it" {
stack.push(20);
let old_size = stack.size();
stack.pop();
assert_eq!(stack.size(), old_size - 1);
}
it "should pop value that was pushed into the stack" {
stack.push(20);
assert_eq!(stack.pop(), Some(20));
stack.push(10);
assert_eq!(stack.pop(), Some(10));
}
it "should pop value last that was pushed first into the stack" {
stack.push(10);
stack.push(20);
stack.push(30);
stack.push(40);
stack.push(50);
assert_eq!(stack.pop(), Some(50));
assert_eq!(stack.pop(), Some(40));
assert_eq!(stack.pop(), Some(30));
assert_eq!(stack.pop(), Some(20));
assert_eq!(stack.pop(), Some(10));
}
} | } | random_line_split |
commands.rs | #![allow(dead_code)]
extern crate jam;
extern crate cgmath;
extern crate time;
extern crate glutin;
extern crate image;
extern crate gfx_device_gl;
#[macro_use]
extern crate aphid;
use std::f64::consts::PI;
use std::path::{Path, PathBuf};
use cgmath::Rad;
use aphid::{HashSet, Seconds};
use jam::color;
use jam::{Vec3, Vec2, JamResult, Dimensions, Color, rgb, Camera, InputState, FontDirectory};
use jam::render::*;
use jam::render::gfx::{Renderer, GeometryBuffer, OpenGLRenderer, construct_opengl_renderer};
use aphid::HashMap;
fn main() {
let resources_path = PathBuf::from("resources");
let shader_pair = ShaderPair::for_paths("resources/shader/fat.vert", "resources/shader/fat.frag");
let texture_dir = TextureDirectory::for_path("resources/textures", hashset!["png".into()]);
let font_dir = FontDirectory::for_path("resources/fonts");
let file_resources = FileResources {
resources: resources_path,
shader_pair : shader_pair,
texture_directory: texture_dir,
font_directory: font_dir,
};
println!("creating renderer");
let renderer = construct_opengl_renderer(file_resources, (800, 600), true, "commands example".into()).expect("a renderer");
println!("done creating renderer");
let mut app = App {
name: "mixalot".into(),
camera: Camera {
at: Vec3::new(0.0, 0.0, 0.0),
pitch: Rad(PI / 4.0_f64),
viewport: Dimensions {
pixels: (800,600),
points: (800,600),
},
points_per_unit: 16.0 * 1.0,
},
zoom: 1.0,
points_per_unit: 16.0,
n: 0, // frame counter
renderer: renderer,
geometry: HashMap::default(),
};
app.run();
}
struct App {
name : String,
camera : Camera,
zoom : f64,
points_per_unit : f64,
n : u64,
renderer: OpenGLRenderer,
geometry : HashMap<String, GeometryBuffer<gfx_device_gl::Resources>>,
}
impl App {
fn run(&mut self) {
let mut last_time = time::precise_time_ns();
'main: loop {
let (dimensions, input_state) = self.renderer.begin_frame(rgb(132, 193, 255));
if input_state.close {
break;
}
// println!("dimensions -> {:?}", dimensions);
let time = time::precise_time_ns();
let delta_time = ((time - last_time) as f64) / 1_000_000.0;
self.update(&input_state, dimensions, delta_time);
let res = self.render().expect("successful rendering");
last_time = time;
}
}
fn units_per_point(&self) -> f64 {
1.0 / self.points_per_unit
}
fn | (&self) -> GeometryTesselator {
let upp = self.units_per_point();
let tesselator_scale = Vec3::new(upp, upp, upp);
GeometryTesselator::new(tesselator_scale)
}
fn update(&mut self, input_state:&InputState, dimensions:Dimensions, delta_time: Seconds) {
use glutin::VirtualKeyCode;
self.n += 1;
self.camera.at = Vec3::new(17.0, 0.0, 17.0);
// self.camera.at = Vec3::new(8.0, 0.0, 8.0);
self.camera.points_per_unit = self.points_per_unit * self.zoom;
self.camera.viewport = dimensions;
// println!("Camera viewpoinrt -> {:?}", self.camera.viewport);
// let (mx, my) = input_state.mouse.at;
// let mouse_at = self.camera.ui_line_segment_for_mouse_position(mx, my);
if input_state.keys.pushed.contains(&VirtualKeyCode::P) {
// println!("take a screenshot!");
// let image = self.renderer.screenshot();
// let mut output = std::fs::file::create(&path::new("screenshot.png")).unwrap();
// image.save(&mut output, image::imageformat::png).unwrap();
}
}
fn render(&mut self) -> JamResult<()> {
// use jam::font::FontDescription;
// let font_description = FontDescription { family: "Roboto-Medium".into(), pixel_size: (32f64 * self.camera.viewport.scale()) as u32 };
// let loaded = self.renderer.load_font(&font_description);
// match loaded {
// Err(e) => println!("font load error -> {:?}", e),
// Ok(_) => (),
// }
// println!("render with delta -> {:?}", delta_time);
let colors = vec![color::WHITE, color::BLUE, color::RED];
// let (w, h) = self.camera.viewport.pixels;
// let line = self.camera.ray_for_mouse_position((w / 2) as i32, (h / 2) as i32);
// println!("forward line -> {:?}", line);
let an = self.n / 60;
let on_second = (self.n % 60) == 0;
let raster_color = colors[(((an / 16) % 16) % 3) as usize]; // cycles every 16 seconds
if on_second && an % 5 == 0 { // every fifth second
let column = (an / 4) % 4;
let name : String = format!("zone_{}", column);
println!("delete {}", name);
let pred : Box<Fn(&String) -> bool> = Box::new(move |key| key.starts_with(&name));
let keys_to_delete : Vec<_>= self.geometry.keys().filter(|e| pred(e)).cloned().collect();
for key in keys_to_delete.iter() {
self.geometry.remove(key);
}
}
// k.starts_with(&prefix)
let n = (((an % 16) as f64) / 16.0 * 255.0) as u8;
let mut t = self.tesselator();
let mut vertices = Vec::new();
let cache = &mut self.geometry;
let camera = &self.camera;
// this closure shit is just dangerous
// render a grid of various bits of geo
for i in 0..16 {
let xo = i % 4;
let zo = i / 4;
let name : String = format!("zone_{}_{}", xo, zo);
if (an % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
let geo = self.renderer.draw_vertices(&vertices, Uniforms {
transform : down_size_m4(camera.view_projection().into()),
color: color::WHITE,
}, Blend::None)?;
cache.insert(name, geo);
} else if ((an+8) % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
cache.insert(name, self.renderer.upload(&vertices));
} else {
let rem = (xo + zo) % 3;
let color = match rem {
0 => color::rgba(255,255,255, 128),
1 => color::rgba(255,255,255, 50),
_ => color::WHITE,
};
if let Some(geo) = cache.get(&name) {
let blend = match rem {
0 => Blend::Alpha,
1 => Blend::Add,
_ => Blend::None,
};
self.renderer.draw(geo, Uniforms {
transform: down_size_m4(self.camera.view_projection().into()),
color: color,
},blend)?;
}
}
}
// draw ui text
// if let Some((font, layer)) = self.renderer.get_font(&font_description) {
// // println!("ok we got a font to use to draw layer -> {:?}", layer);
// let scale = 1.0 / self.camera.viewport.scale();
// let mut t = GeometryTesselator::new(Vec3::new(1.0, 1.0, 1.0));
//
// let texture_region = TextureRegion {
// u_min: 0,
// u_max: 128,
// v_min: 0,
// v_max: 128,
// texture_size: 1024,
// };
// t.color = color::WHITE.float_raw();
// t.draw_ui(&mut vertices, &texture_region, 0, 20.0, 20.0, 0.0, 1.0);
//
// let at = Vec2::new(0.0, 400.0);
// t.color = color::BLACK.float_raw();
// text::render_text(
// "Why oh why does a silly cow fly, you idiot.\n\nGo die in a pie.\n\nPls.",
// font,
// layer,
// at,
// -1.0, // i assume this is because our coordinate system is hosed...
// scale,
// &t,
// &mut vertices,
// Some(300.0)
// );
//
// frame.draw_vertices(&vertices, Uniforms {
// transform : down_size_m4(self.camera.ui_projection().into()),
// color: color::WHITE,
// }, Blend::Alpha);
// }
self.renderer.finish_frame().expect("a finished frame");
Ok(())
}
}
fn raster(t: &mut GeometryTesselator, vertices: &mut Vec<Vertex>, color:Color, x:f64, z:f64) {
vertices.clear();
let texture_region = TextureRegion {
u_min: 0,
u_max: 128,
v_min: 0,
v_max: 128,
layer: 0,
texture_size: 1024,
};
let texture_region_small = TextureRegion {
u_min: 16,
u_max: 32,
v_min: 16,
v_max: 32,
layer: 0,
texture_size: 1024,
};
t.color = color.float_raw();
//.h_flip().v_flip()
t.draw_floor_tile(vertices, &texture_region, x, 0.0, z, 0.0);
t.color = color::RED.float_raw();
t.draw_wall_tile(vertices, &texture_region_small, x, 0.0, z, 0.0);
t.color = color::GREEN.float_raw();
t.draw_floor_centre_anchored(vertices, &texture_region_small, x + 2.0, 0.0, z + 2.0, 0.1);
t.color = color::YELLOW.float_raw();
t.draw_floor_centre_anchored_rotated(vertices, &texture_region_small, x + 4.0, 0.0, z + 4.0, 0.0, 0.1);
t.color = color::RED.float_raw();
t.draw_wall_base_anchored(vertices, &texture_region_small, x + 3.0, 0.0, z, 0.0);
t.color = color::YELLOW.float_raw();
t.draw_wall_centre_anchored(vertices, &texture_region_small, x + 5.0, 1.0, z, 0.0);
}
| tesselator | identifier_name |
commands.rs | #![allow(dead_code)]
extern crate jam;
extern crate cgmath;
extern crate time;
extern crate glutin;
extern crate image;
extern crate gfx_device_gl;
#[macro_use]
extern crate aphid;
use std::f64::consts::PI;
use std::path::{Path, PathBuf};
use cgmath::Rad;
use aphid::{HashSet, Seconds};
use jam::color;
use jam::{Vec3, Vec2, JamResult, Dimensions, Color, rgb, Camera, InputState, FontDirectory};
use jam::render::*;
use jam::render::gfx::{Renderer, GeometryBuffer, OpenGLRenderer, construct_opengl_renderer};
use aphid::HashMap;
fn main() {
let resources_path = PathBuf::from("resources");
let shader_pair = ShaderPair::for_paths("resources/shader/fat.vert", "resources/shader/fat.frag");
let texture_dir = TextureDirectory::for_path("resources/textures", hashset!["png".into()]);
let font_dir = FontDirectory::for_path("resources/fonts");
let file_resources = FileResources {
resources: resources_path,
shader_pair : shader_pair,
texture_directory: texture_dir,
font_directory: font_dir,
};
println!("creating renderer");
let renderer = construct_opengl_renderer(file_resources, (800, 600), true, "commands example".into()).expect("a renderer");
println!("done creating renderer");
let mut app = App {
name: "mixalot".into(),
camera: Camera {
at: Vec3::new(0.0, 0.0, 0.0),
pitch: Rad(PI / 4.0_f64),
viewport: Dimensions {
pixels: (800,600),
points: (800,600),
},
points_per_unit: 16.0 * 1.0,
},
zoom: 1.0,
points_per_unit: 16.0,
n: 0, // frame counter
renderer: renderer,
geometry: HashMap::default(),
};
app.run();
}
struct App {
name : String,
camera : Camera,
zoom : f64,
points_per_unit : f64,
n : u64,
renderer: OpenGLRenderer,
geometry : HashMap<String, GeometryBuffer<gfx_device_gl::Resources>>,
}
impl App {
fn run(&mut self) | }
fn units_per_point(&self) -> f64 {
1.0 / self.points_per_unit
}
fn tesselator(&self) -> GeometryTesselator {
let upp = self.units_per_point();
let tesselator_scale = Vec3::new(upp, upp, upp);
GeometryTesselator::new(tesselator_scale)
}
fn update(&mut self, input_state:&InputState, dimensions:Dimensions, delta_time: Seconds) {
use glutin::VirtualKeyCode;
self.n += 1;
self.camera.at = Vec3::new(17.0, 0.0, 17.0);
// self.camera.at = Vec3::new(8.0, 0.0, 8.0);
self.camera.points_per_unit = self.points_per_unit * self.zoom;
self.camera.viewport = dimensions;
// println!("Camera viewpoinrt -> {:?}", self.camera.viewport);
// let (mx, my) = input_state.mouse.at;
// let mouse_at = self.camera.ui_line_segment_for_mouse_position(mx, my);
if input_state.keys.pushed.contains(&VirtualKeyCode::P) {
// println!("take a screenshot!");
// let image = self.renderer.screenshot();
// let mut output = std::fs::file::create(&path::new("screenshot.png")).unwrap();
// image.save(&mut output, image::imageformat::png).unwrap();
}
}
fn render(&mut self) -> JamResult<()> {
// use jam::font::FontDescription;
// let font_description = FontDescription { family: "Roboto-Medium".into(), pixel_size: (32f64 * self.camera.viewport.scale()) as u32 };
// let loaded = self.renderer.load_font(&font_description);
// match loaded {
// Err(e) => println!("font load error -> {:?}", e),
// Ok(_) => (),
// }
// println!("render with delta -> {:?}", delta_time);
let colors = vec![color::WHITE, color::BLUE, color::RED];
// let (w, h) = self.camera.viewport.pixels;
// let line = self.camera.ray_for_mouse_position((w / 2) as i32, (h / 2) as i32);
// println!("forward line -> {:?}", line);
let an = self.n / 60;
let on_second = (self.n % 60) == 0;
let raster_color = colors[(((an / 16) % 16) % 3) as usize]; // cycles every 16 seconds
if on_second && an % 5 == 0 { // every fifth second
let column = (an / 4) % 4;
let name : String = format!("zone_{}", column);
println!("delete {}", name);
let pred : Box<Fn(&String) -> bool> = Box::new(move |key| key.starts_with(&name));
let keys_to_delete : Vec<_>= self.geometry.keys().filter(|e| pred(e)).cloned().collect();
for key in keys_to_delete.iter() {
self.geometry.remove(key);
}
}
// k.starts_with(&prefix)
let n = (((an % 16) as f64) / 16.0 * 255.0) as u8;
let mut t = self.tesselator();
let mut vertices = Vec::new();
let cache = &mut self.geometry;
let camera = &self.camera;
// this closure shit is just dangerous
// render a grid of various bits of geo
for i in 0..16 {
let xo = i % 4;
let zo = i / 4;
let name : String = format!("zone_{}_{}", xo, zo);
if (an % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
let geo = self.renderer.draw_vertices(&vertices, Uniforms {
transform : down_size_m4(camera.view_projection().into()),
color: color::WHITE,
}, Blend::None)?;
cache.insert(name, geo);
} else if ((an+8) % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
cache.insert(name, self.renderer.upload(&vertices));
} else {
let rem = (xo + zo) % 3;
let color = match rem {
0 => color::rgba(255,255,255, 128),
1 => color::rgba(255,255,255, 50),
_ => color::WHITE,
};
if let Some(geo) = cache.get(&name) {
let blend = match rem {
0 => Blend::Alpha,
1 => Blend::Add,
_ => Blend::None,
};
self.renderer.draw(geo, Uniforms {
transform: down_size_m4(self.camera.view_projection().into()),
color: color,
},blend)?;
}
}
}
// draw ui text
// if let Some((font, layer)) = self.renderer.get_font(&font_description) {
// // println!("ok we got a font to use to draw layer -> {:?}", layer);
// let scale = 1.0 / self.camera.viewport.scale();
// let mut t = GeometryTesselator::new(Vec3::new(1.0, 1.0, 1.0));
//
// let texture_region = TextureRegion {
// u_min: 0,
// u_max: 128,
// v_min: 0,
// v_max: 128,
// texture_size: 1024,
// };
// t.color = color::WHITE.float_raw();
// t.draw_ui(&mut vertices, &texture_region, 0, 20.0, 20.0, 0.0, 1.0);
//
// let at = Vec2::new(0.0, 400.0);
// t.color = color::BLACK.float_raw();
// text::render_text(
// "Why oh why does a silly cow fly, you idiot.\n\nGo die in a pie.\n\nPls.",
// font,
// layer,
// at,
// -1.0, // i assume this is because our coordinate system is hosed...
// scale,
// &t,
// &mut vertices,
// Some(300.0)
// );
//
// frame.draw_vertices(&vertices, Uniforms {
// transform : down_size_m4(self.camera.ui_projection().into()),
// color: color::WHITE,
// }, Blend::Alpha);
// }
self.renderer.finish_frame().expect("a finished frame");
Ok(())
}
}
fn raster(t: &mut GeometryTesselator, vertices: &mut Vec<Vertex>, color:Color, x:f64, z:f64) {
vertices.clear();
let texture_region = TextureRegion {
u_min: 0,
u_max: 128,
v_min: 0,
v_max: 128,
layer: 0,
texture_size: 1024,
};
let texture_region_small = TextureRegion {
u_min: 16,
u_max: 32,
v_min: 16,
v_max: 32,
layer: 0,
texture_size: 1024,
};
t.color = color.float_raw();
//.h_flip().v_flip()
t.draw_floor_tile(vertices, &texture_region, x, 0.0, z, 0.0);
t.color = color::RED.float_raw();
t.draw_wall_tile(vertices, &texture_region_small, x, 0.0, z, 0.0);
t.color = color::GREEN.float_raw();
t.draw_floor_centre_anchored(vertices, &texture_region_small, x + 2.0, 0.0, z + 2.0, 0.1);
t.color = color::YELLOW.float_raw();
t.draw_floor_centre_anchored_rotated(vertices, &texture_region_small, x + 4.0, 0.0, z + 4.0, 0.0, 0.1);
t.color = color::RED.float_raw();
t.draw_wall_base_anchored(vertices, &texture_region_small, x + 3.0, 0.0, z, 0.0);
t.color = color::YELLOW.float_raw();
t.draw_wall_centre_anchored(vertices, &texture_region_small, x + 5.0, 1.0, z, 0.0);
}
| {
let mut last_time = time::precise_time_ns();
'main: loop {
let (dimensions, input_state) = self.renderer.begin_frame(rgb(132, 193, 255));
if input_state.close {
break;
}
// println!("dimensions -> {:?}", dimensions);
let time = time::precise_time_ns();
let delta_time = ((time - last_time) as f64) / 1_000_000.0;
self.update(&input_state, dimensions, delta_time);
let res = self.render().expect("successful rendering");
last_time = time;
} | identifier_body |
commands.rs | #![allow(dead_code)]
extern crate jam;
extern crate cgmath;
extern crate time;
extern crate glutin;
extern crate image;
extern crate gfx_device_gl;
#[macro_use]
extern crate aphid;
use std::f64::consts::PI;
use std::path::{Path, PathBuf};
use cgmath::Rad;
use aphid::{HashSet, Seconds};
use jam::color;
use jam::{Vec3, Vec2, JamResult, Dimensions, Color, rgb, Camera, InputState, FontDirectory};
use jam::render::*;
use jam::render::gfx::{Renderer, GeometryBuffer, OpenGLRenderer, construct_opengl_renderer};
use aphid::HashMap;
fn main() {
let resources_path = PathBuf::from("resources");
let shader_pair = ShaderPair::for_paths("resources/shader/fat.vert", "resources/shader/fat.frag");
let texture_dir = TextureDirectory::for_path("resources/textures", hashset!["png".into()]);
let font_dir = FontDirectory::for_path("resources/fonts");
let file_resources = FileResources {
resources: resources_path,
shader_pair : shader_pair,
texture_directory: texture_dir,
font_directory: font_dir,
};
println!("creating renderer");
let renderer = construct_opengl_renderer(file_resources, (800, 600), true, "commands example".into()).expect("a renderer");
println!("done creating renderer");
let mut app = App {
name: "mixalot".into(),
camera: Camera {
at: Vec3::new(0.0, 0.0, 0.0),
pitch: Rad(PI / 4.0_f64),
viewport: Dimensions {
pixels: (800,600),
points: (800,600),
},
points_per_unit: 16.0 * 1.0,
},
zoom: 1.0,
points_per_unit: 16.0,
n: 0, // frame counter
renderer: renderer,
geometry: HashMap::default(),
};
app.run();
}
struct App {
name : String,
camera : Camera,
zoom : f64,
points_per_unit : f64,
n : u64,
renderer: OpenGLRenderer,
geometry : HashMap<String, GeometryBuffer<gfx_device_gl::Resources>>,
}
impl App {
fn run(&mut self) {
let mut last_time = time::precise_time_ns();
'main: loop {
let (dimensions, input_state) = self.renderer.begin_frame(rgb(132, 193, 255));
if input_state.close {
break;
}
// println!("dimensions -> {:?}", dimensions);
let time = time::precise_time_ns();
let delta_time = ((time - last_time) as f64) / 1_000_000.0;
self.update(&input_state, dimensions, delta_time);
let res = self.render().expect("successful rendering");
last_time = time;
}
}
fn units_per_point(&self) -> f64 {
1.0 / self.points_per_unit
}
fn tesselator(&self) -> GeometryTesselator {
let upp = self.units_per_point();
let tesselator_scale = Vec3::new(upp, upp, upp);
GeometryTesselator::new(tesselator_scale)
}
fn update(&mut self, input_state:&InputState, dimensions:Dimensions, delta_time: Seconds) {
use glutin::VirtualKeyCode;
self.n += 1;
self.camera.at = Vec3::new(17.0, 0.0, 17.0);
// self.camera.at = Vec3::new(8.0, 0.0, 8.0);
self.camera.points_per_unit = self.points_per_unit * self.zoom;
self.camera.viewport = dimensions;
// println!("Camera viewpoinrt -> {:?}", self.camera.viewport);
// let (mx, my) = input_state.mouse.at;
// let mouse_at = self.camera.ui_line_segment_for_mouse_position(mx, my);
if input_state.keys.pushed.contains(&VirtualKeyCode::P) {
// println!("take a screenshot!");
// let image = self.renderer.screenshot();
// let mut output = std::fs::file::create(&path::new("screenshot.png")).unwrap();
// image.save(&mut output, image::imageformat::png).unwrap();
}
}
fn render(&mut self) -> JamResult<()> {
// use jam::font::FontDescription;
// let font_description = FontDescription { family: "Roboto-Medium".into(), pixel_size: (32f64 * self.camera.viewport.scale()) as u32 };
// let loaded = self.renderer.load_font(&font_description);
// match loaded {
// Err(e) => println!("font load error -> {:?}", e),
// Ok(_) => (),
// }
// println!("render with delta -> {:?}", delta_time);
let colors = vec![color::WHITE, color::BLUE, color::RED];
// let (w, h) = self.camera.viewport.pixels;
// let line = self.camera.ray_for_mouse_position((w / 2) as i32, (h / 2) as i32);
// println!("forward line -> {:?}", line);
let an = self.n / 60;
let on_second = (self.n % 60) == 0;
let raster_color = colors[(((an / 16) % 16) % 3) as usize]; // cycles every 16 seconds
if on_second && an % 5 == 0 { // every fifth second
let column = (an / 4) % 4;
let name : String = format!("zone_{}", column);
println!("delete {}", name);
let pred : Box<Fn(&String) -> bool> = Box::new(move |key| key.starts_with(&name));
let keys_to_delete : Vec<_>= self.geometry.keys().filter(|e| pred(e)).cloned().collect();
for key in keys_to_delete.iter() {
self.geometry.remove(key);
}
}
// k.starts_with(&prefix)
let n = (((an % 16) as f64) / 16.0 * 255.0) as u8;
let mut t = self.tesselator();
let mut vertices = Vec::new();
let cache = &mut self.geometry;
let camera = &self.camera;
// this closure shit is just dangerous
// render a grid of various bits of geo
for i in 0..16 {
let xo = i % 4;
let zo = i / 4;
let name : String = format!("zone_{}_{}", xo, zo);
if (an % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
let geo = self.renderer.draw_vertices(&vertices, Uniforms {
transform : down_size_m4(camera.view_projection().into()),
color: color::WHITE,
}, Blend::None)?;
cache.insert(name, geo);
} else if ((an+8) % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
cache.insert(name, self.renderer.upload(&vertices));
} else {
let rem = (xo + zo) % 3;
let color = match rem {
0 => color::rgba(255,255,255, 128),
1 => color::rgba(255,255,255, 50),
_ => color::WHITE,
};
if let Some(geo) = cache.get(&name) |
}
}
// draw ui text
// if let Some((font, layer)) = self.renderer.get_font(&font_description) {
// // println!("ok we got a font to use to draw layer -> {:?}", layer);
// let scale = 1.0 / self.camera.viewport.scale();
// let mut t = GeometryTesselator::new(Vec3::new(1.0, 1.0, 1.0));
//
// let texture_region = TextureRegion {
// u_min: 0,
// u_max: 128,
// v_min: 0,
// v_max: 128,
// texture_size: 1024,
// };
// t.color = color::WHITE.float_raw();
// t.draw_ui(&mut vertices, &texture_region, 0, 20.0, 20.0, 0.0, 1.0);
//
// let at = Vec2::new(0.0, 400.0);
// t.color = color::BLACK.float_raw();
// text::render_text(
// "Why oh why does a silly cow fly, you idiot.\n\nGo die in a pie.\n\nPls.",
// font,
// layer,
// at,
// -1.0, // i assume this is because our coordinate system is hosed...
// scale,
// &t,
// &mut vertices,
// Some(300.0)
// );
//
// frame.draw_vertices(&vertices, Uniforms {
// transform : down_size_m4(self.camera.ui_projection().into()),
// color: color::WHITE,
// }, Blend::Alpha);
// }
self.renderer.finish_frame().expect("a finished frame");
Ok(())
}
}
fn raster(t: &mut GeometryTesselator, vertices: &mut Vec<Vertex>, color:Color, x:f64, z:f64) {
vertices.clear();
let texture_region = TextureRegion {
u_min: 0,
u_max: 128,
v_min: 0,
v_max: 128,
layer: 0,
texture_size: 1024,
};
let texture_region_small = TextureRegion {
u_min: 16,
u_max: 32,
v_min: 16,
v_max: 32,
layer: 0,
texture_size: 1024,
};
t.color = color.float_raw();
//.h_flip().v_flip()
t.draw_floor_tile(vertices, &texture_region, x, 0.0, z, 0.0);
t.color = color::RED.float_raw();
t.draw_wall_tile(vertices, &texture_region_small, x, 0.0, z, 0.0);
t.color = color::GREEN.float_raw();
t.draw_floor_centre_anchored(vertices, &texture_region_small, x + 2.0, 0.0, z + 2.0, 0.1);
t.color = color::YELLOW.float_raw();
t.draw_floor_centre_anchored_rotated(vertices, &texture_region_small, x + 4.0, 0.0, z + 4.0, 0.0, 0.1);
t.color = color::RED.float_raw();
t.draw_wall_base_anchored(vertices, &texture_region_small, x + 3.0, 0.0, z, 0.0);
t.color = color::YELLOW.float_raw();
t.draw_wall_centre_anchored(vertices, &texture_region_small, x + 5.0, 1.0, z, 0.0);
}
| {
let blend = match rem {
0 => Blend::Alpha,
1 => Blend::Add,
_ => Blend::None,
};
self.renderer.draw(geo, Uniforms {
transform: down_size_m4(self.camera.view_projection().into()),
color: color,
},blend)?;
} | conditional_block |
commands.rs | #![allow(dead_code)]
extern crate jam;
extern crate cgmath;
extern crate time;
extern crate glutin;
extern crate image;
extern crate gfx_device_gl;
#[macro_use]
extern crate aphid;
use std::f64::consts::PI;
use std::path::{Path, PathBuf};
use cgmath::Rad;
use aphid::{HashSet, Seconds};
use jam::color;
use jam::{Vec3, Vec2, JamResult, Dimensions, Color, rgb, Camera, InputState, FontDirectory};
use jam::render::*;
use jam::render::gfx::{Renderer, GeometryBuffer, OpenGLRenderer, construct_opengl_renderer};
use aphid::HashMap;
fn main() {
let resources_path = PathBuf::from("resources");
let shader_pair = ShaderPair::for_paths("resources/shader/fat.vert", "resources/shader/fat.frag");
let texture_dir = TextureDirectory::for_path("resources/textures", hashset!["png".into()]);
let font_dir = FontDirectory::for_path("resources/fonts");
let file_resources = FileResources {
resources: resources_path,
shader_pair : shader_pair,
texture_directory: texture_dir,
font_directory: font_dir,
};
println!("creating renderer");
let renderer = construct_opengl_renderer(file_resources, (800, 600), true, "commands example".into()).expect("a renderer");
println!("done creating renderer");
let mut app = App {
name: "mixalot".into(),
camera: Camera {
at: Vec3::new(0.0, 0.0, 0.0),
pitch: Rad(PI / 4.0_f64),
viewport: Dimensions {
pixels: (800,600),
points: (800,600),
},
points_per_unit: 16.0 * 1.0,
},
zoom: 1.0,
points_per_unit: 16.0,
n: 0, // frame counter
renderer: renderer,
geometry: HashMap::default(),
};
app.run();
}
struct App {
name : String,
camera : Camera,
zoom : f64,
points_per_unit : f64,
n : u64,
renderer: OpenGLRenderer,
geometry : HashMap<String, GeometryBuffer<gfx_device_gl::Resources>>,
}
impl App {
fn run(&mut self) {
let mut last_time = time::precise_time_ns();
'main: loop {
let (dimensions, input_state) = self.renderer.begin_frame(rgb(132, 193, 255));
if input_state.close {
break;
}
// println!("dimensions -> {:?}", dimensions);
let time = time::precise_time_ns();
let delta_time = ((time - last_time) as f64) / 1_000_000.0;
self.update(&input_state, dimensions, delta_time);
let res = self.render().expect("successful rendering");
last_time = time;
}
}
fn units_per_point(&self) -> f64 {
1.0 / self.points_per_unit
}
fn tesselator(&self) -> GeometryTesselator {
let upp = self.units_per_point();
let tesselator_scale = Vec3::new(upp, upp, upp);
GeometryTesselator::new(tesselator_scale)
}
fn update(&mut self, input_state:&InputState, dimensions:Dimensions, delta_time: Seconds) {
use glutin::VirtualKeyCode;
self.n += 1;
self.camera.at = Vec3::new(17.0, 0.0, 17.0);
// self.camera.at = Vec3::new(8.0, 0.0, 8.0);
self.camera.points_per_unit = self.points_per_unit * self.zoom;
self.camera.viewport = dimensions;
// println!("Camera viewpoinrt -> {:?}", self.camera.viewport);
// let (mx, my) = input_state.mouse.at;
// let mouse_at = self.camera.ui_line_segment_for_mouse_position(mx, my);
if input_state.keys.pushed.contains(&VirtualKeyCode::P) {
// println!("take a screenshot!");
// let image = self.renderer.screenshot();
// let mut output = std::fs::file::create(&path::new("screenshot.png")).unwrap();
// image.save(&mut output, image::imageformat::png).unwrap();
}
}
fn render(&mut self) -> JamResult<()> {
// use jam::font::FontDescription;
// let font_description = FontDescription { family: "Roboto-Medium".into(), pixel_size: (32f64 * self.camera.viewport.scale()) as u32 };
// let loaded = self.renderer.load_font(&font_description);
// match loaded {
// Err(e) => println!("font load error -> {:?}", e),
// Ok(_) => (),
// }
// println!("render with delta -> {:?}", delta_time);
let colors = vec![color::WHITE, color::BLUE, color::RED];
// let (w, h) = self.camera.viewport.pixels;
// let line = self.camera.ray_for_mouse_position((w / 2) as i32, (h / 2) as i32);
// println!("forward line -> {:?}", line);
let an = self.n / 60;
let on_second = (self.n % 60) == 0;
let raster_color = colors[(((an / 16) % 16) % 3) as usize]; // cycles every 16 seconds
if on_second && an % 5 == 0 { // every fifth second
let column = (an / 4) % 4;
let name : String = format!("zone_{}", column);
println!("delete {}", name);
let pred : Box<Fn(&String) -> bool> = Box::new(move |key| key.starts_with(&name));
let keys_to_delete : Vec<_>= self.geometry.keys().filter(|e| pred(e)).cloned().collect();
for key in keys_to_delete.iter() {
self.geometry.remove(key);
}
}
// k.starts_with(&prefix)
let n = (((an % 16) as f64) / 16.0 * 255.0) as u8;
let mut t = self.tesselator();
let mut vertices = Vec::new();
let cache = &mut self.geometry;
let camera = &self.camera;
// this closure shit is just dangerous
// render a grid of various bits of geo
for i in 0..16 {
let xo = i % 4;
let zo = i / 4;
let name : String = format!("zone_{}_{}", xo, zo);
if (an % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
let geo = self.renderer.draw_vertices(&vertices, Uniforms {
transform : down_size_m4(camera.view_projection().into()),
color: color::WHITE,
}, Blend::None)?;
cache.insert(name, geo);
} else if ((an+8) % 16) == i && on_second {
raster(&mut t, &mut vertices, raster_color, (xo * 9) as f64, (zo * 9) as f64);
cache.insert(name, self.renderer.upload(&vertices));
} else {
let rem = (xo + zo) % 3;
let color = match rem {
0 => color::rgba(255,255,255, 128),
1 => color::rgba(255,255,255, 50),
_ => color::WHITE,
};
if let Some(geo) = cache.get(&name) {
let blend = match rem {
0 => Blend::Alpha,
1 => Blend::Add,
_ => Blend::None,
};
self.renderer.draw(geo, Uniforms {
transform: down_size_m4(self.camera.view_projection().into()),
color: color,
},blend)?;
}
}
}
// draw ui text
// if let Some((font, layer)) = self.renderer.get_font(&font_description) {
// // println!("ok we got a font to use to draw layer -> {:?}", layer);
// let scale = 1.0 / self.camera.viewport.scale();
// let mut t = GeometryTesselator::new(Vec3::new(1.0, 1.0, 1.0));
//
// let texture_region = TextureRegion {
// u_min: 0,
// u_max: 128,
// v_min: 0,
// v_max: 128,
// texture_size: 1024,
// };
// t.color = color::WHITE.float_raw();
// t.draw_ui(&mut vertices, &texture_region, 0, 20.0, 20.0, 0.0, 1.0);
//
// let at = Vec2::new(0.0, 400.0);
// t.color = color::BLACK.float_raw();
// text::render_text(
// "Why oh why does a silly cow fly, you idiot.\n\nGo die in a pie.\n\nPls.",
// font,
// layer,
// at,
// -1.0, // i assume this is because our coordinate system is hosed...
// scale,
// &t,
// &mut vertices,
// Some(300.0) | //
// frame.draw_vertices(&vertices, Uniforms {
// transform : down_size_m4(self.camera.ui_projection().into()),
// color: color::WHITE,
// }, Blend::Alpha);
// }
self.renderer.finish_frame().expect("a finished frame");
Ok(())
}
}
fn raster(t: &mut GeometryTesselator, vertices: &mut Vec<Vertex>, color:Color, x:f64, z:f64) {
vertices.clear();
let texture_region = TextureRegion {
u_min: 0,
u_max: 128,
v_min: 0,
v_max: 128,
layer: 0,
texture_size: 1024,
};
let texture_region_small = TextureRegion {
u_min: 16,
u_max: 32,
v_min: 16,
v_max: 32,
layer: 0,
texture_size: 1024,
};
t.color = color.float_raw();
//.h_flip().v_flip()
t.draw_floor_tile(vertices, &texture_region, x, 0.0, z, 0.0);
t.color = color::RED.float_raw();
t.draw_wall_tile(vertices, &texture_region_small, x, 0.0, z, 0.0);
t.color = color::GREEN.float_raw();
t.draw_floor_centre_anchored(vertices, &texture_region_small, x + 2.0, 0.0, z + 2.0, 0.1);
t.color = color::YELLOW.float_raw();
t.draw_floor_centre_anchored_rotated(vertices, &texture_region_small, x + 4.0, 0.0, z + 4.0, 0.0, 0.1);
t.color = color::RED.float_raw();
t.draw_wall_base_anchored(vertices, &texture_region_small, x + 3.0, 0.0, z, 0.0);
t.color = color::YELLOW.float_raw();
t.draw_wall_centre_anchored(vertices, &texture_region_small, x + 5.0, 1.0, z, 0.0);
} | // ); | random_line_split |
stats.rs | use clap::ArgMatches;
use chrono::Local;
use serde_json;
use serde::ser::{MapVisitor, Serialize, Serializer};
use ilc_base;
use ilc_ops::stats::Stats;
use Environment;
use Cli;
use error;
struct StatFormat {
version: String,
master_hash: Option<String>,
time: String,
stats: Stats,
}
impl Serialize for StatFormat {
fn serialize<S>(&self, s: &mut S) -> Result<(), S::Error>
where S: Serializer
| }
}
pub fn output_as_json(args: &ArgMatches, cli: &Cli, stats: Stats) -> ilc_base::Result<()> {
let e = Environment(args);
// let count = value_t!(args, "count", usize).unwrap_or(usize::MAX);
// let mut stats: Vec<(String, Person)> = stats.into_iter().collect();
// stats.sort_by(|&(_, ref a), &(_, ref b)| b.words.cmp(&a.words));
// for &(ref name, ref stat) in stats.iter().take(count) {
let format = StatFormat {
version: cli.version.clone(),
master_hash: cli.master_hash.clone(),
time: Local::now().to_rfc2822(),
stats: stats,
};
serde_json::to_writer_pretty(&mut e.output(), &format).unwrap_or_else(|e| error(Box::new(e)));
/* write!(&mut *e.output(),
* "{}:\n\tTotal lines: {}\n\tLines without alphabetic characters: {}\n\tTotal \
* words: {}\n\tWords per line: {}\n",
* name,
* stat.lines,
* stat.lines - stat.alpha_lines,
* stat.words,
* stat.words as f32 / stat.lines as f32)
*.unwrap_or_else(|e| error(Box::new(e))); */
// }
Ok(())
}
| {
struct Visitor<'a>(&'a StatFormat);
impl<'a> MapVisitor for Visitor<'a> {
fn visit<S>(&mut self, s: &mut S) -> Result<Option<()>, S::Error>
where S: Serializer
{
try!(s.serialize_struct_elt("version", &self.0.version));
if let &Some(ref h) = &self.0.master_hash {
try!(s.serialize_struct_elt("master_hash", h));
}
try!(s.serialize_struct_elt("time", &self.0.time));
try!(s.serialize_struct_elt("stats", &self.0.stats));
Ok(None)
}
fn len(&self) -> Option<usize> {
Some(4)
}
}
s.serialize_struct("StatFormat", Visitor(self)) | identifier_body |
stats.rs | use clap::ArgMatches;
use chrono::Local;
use serde_json;
use serde::ser::{MapVisitor, Serialize, Serializer};
use ilc_base;
use ilc_ops::stats::Stats;
use Environment;
use Cli;
use error;
struct StatFormat {
version: String,
master_hash: Option<String>,
time: String,
stats: Stats,
}
impl Serialize for StatFormat {
fn serialize<S>(&self, s: &mut S) -> Result<(), S::Error>
where S: Serializer
{
struct Visitor<'a>(&'a StatFormat);
impl<'a> MapVisitor for Visitor<'a> {
fn visit<S>(&mut self, s: &mut S) -> Result<Option<()>, S::Error>
where S: Serializer
{
try!(s.serialize_struct_elt("version", &self.0.version));
if let &Some(ref h) = &self.0.master_hash |
try!(s.serialize_struct_elt("time", &self.0.time));
try!(s.serialize_struct_elt("stats", &self.0.stats));
Ok(None)
}
fn len(&self) -> Option<usize> {
Some(4)
}
}
s.serialize_struct("StatFormat", Visitor(self))
}
}
pub fn output_as_json(args: &ArgMatches, cli: &Cli, stats: Stats) -> ilc_base::Result<()> {
let e = Environment(args);
// let count = value_t!(args, "count", usize).unwrap_or(usize::MAX);
// let mut stats: Vec<(String, Person)> = stats.into_iter().collect();
// stats.sort_by(|&(_, ref a), &(_, ref b)| b.words.cmp(&a.words));
// for &(ref name, ref stat) in stats.iter().take(count) {
let format = StatFormat {
version: cli.version.clone(),
master_hash: cli.master_hash.clone(),
time: Local::now().to_rfc2822(),
stats: stats,
};
serde_json::to_writer_pretty(&mut e.output(), &format).unwrap_or_else(|e| error(Box::new(e)));
/* write!(&mut *e.output(),
* "{}:\n\tTotal lines: {}\n\tLines without alphabetic characters: {}\n\tTotal \
* words: {}\n\tWords per line: {}\n",
* name,
* stat.lines,
* stat.lines - stat.alpha_lines,
* stat.words,
* stat.words as f32 / stat.lines as f32)
*.unwrap_or_else(|e| error(Box::new(e))); */
// }
Ok(())
}
| {
try!(s.serialize_struct_elt("master_hash", h));
} | conditional_block |
stats.rs | use clap::ArgMatches;
use chrono::Local;
use serde_json;
use serde::ser::{MapVisitor, Serialize, Serializer};
use ilc_base;
use ilc_ops::stats::Stats;
use Environment;
use Cli;
use error;
struct StatFormat {
version: String,
master_hash: Option<String>,
time: String,
stats: Stats,
}
impl Serialize for StatFormat {
fn | <S>(&self, s: &mut S) -> Result<(), S::Error>
where S: Serializer
{
struct Visitor<'a>(&'a StatFormat);
impl<'a> MapVisitor for Visitor<'a> {
fn visit<S>(&mut self, s: &mut S) -> Result<Option<()>, S::Error>
where S: Serializer
{
try!(s.serialize_struct_elt("version", &self.0.version));
if let &Some(ref h) = &self.0.master_hash {
try!(s.serialize_struct_elt("master_hash", h));
}
try!(s.serialize_struct_elt("time", &self.0.time));
try!(s.serialize_struct_elt("stats", &self.0.stats));
Ok(None)
}
fn len(&self) -> Option<usize> {
Some(4)
}
}
s.serialize_struct("StatFormat", Visitor(self))
}
}
pub fn output_as_json(args: &ArgMatches, cli: &Cli, stats: Stats) -> ilc_base::Result<()> {
let e = Environment(args);
// let count = value_t!(args, "count", usize).unwrap_or(usize::MAX);
// let mut stats: Vec<(String, Person)> = stats.into_iter().collect();
// stats.sort_by(|&(_, ref a), &(_, ref b)| b.words.cmp(&a.words));
// for &(ref name, ref stat) in stats.iter().take(count) {
let format = StatFormat {
version: cli.version.clone(),
master_hash: cli.master_hash.clone(),
time: Local::now().to_rfc2822(),
stats: stats,
};
serde_json::to_writer_pretty(&mut e.output(), &format).unwrap_or_else(|e| error(Box::new(e)));
/* write!(&mut *e.output(),
* "{}:\n\tTotal lines: {}\n\tLines without alphabetic characters: {}\n\tTotal \
* words: {}\n\tWords per line: {}\n",
* name,
* stat.lines,
* stat.lines - stat.alpha_lines,
* stat.words,
* stat.words as f32 / stat.lines as f32)
*.unwrap_or_else(|e| error(Box::new(e))); */
// }
Ok(())
}
| serialize | identifier_name |
stats.rs | use clap::ArgMatches; |
use serde_json;
use serde::ser::{MapVisitor, Serialize, Serializer};
use ilc_base;
use ilc_ops::stats::Stats;
use Environment;
use Cli;
use error;
struct StatFormat {
version: String,
master_hash: Option<String>,
time: String,
stats: Stats,
}
impl Serialize for StatFormat {
fn serialize<S>(&self, s: &mut S) -> Result<(), S::Error>
where S: Serializer
{
struct Visitor<'a>(&'a StatFormat);
impl<'a> MapVisitor for Visitor<'a> {
fn visit<S>(&mut self, s: &mut S) -> Result<Option<()>, S::Error>
where S: Serializer
{
try!(s.serialize_struct_elt("version", &self.0.version));
if let &Some(ref h) = &self.0.master_hash {
try!(s.serialize_struct_elt("master_hash", h));
}
try!(s.serialize_struct_elt("time", &self.0.time));
try!(s.serialize_struct_elt("stats", &self.0.stats));
Ok(None)
}
fn len(&self) -> Option<usize> {
Some(4)
}
}
s.serialize_struct("StatFormat", Visitor(self))
}
}
pub fn output_as_json(args: &ArgMatches, cli: &Cli, stats: Stats) -> ilc_base::Result<()> {
let e = Environment(args);
// let count = value_t!(args, "count", usize).unwrap_or(usize::MAX);
// let mut stats: Vec<(String, Person)> = stats.into_iter().collect();
// stats.sort_by(|&(_, ref a), &(_, ref b)| b.words.cmp(&a.words));
// for &(ref name, ref stat) in stats.iter().take(count) {
let format = StatFormat {
version: cli.version.clone(),
master_hash: cli.master_hash.clone(),
time: Local::now().to_rfc2822(),
stats: stats,
};
serde_json::to_writer_pretty(&mut e.output(), &format).unwrap_or_else(|e| error(Box::new(e)));
/* write!(&mut *e.output(),
* "{}:\n\tTotal lines: {}\n\tLines without alphabetic characters: {}\n\tTotal \
* words: {}\n\tWords per line: {}\n",
* name,
* stat.lines,
* stat.lines - stat.alpha_lines,
* stat.words,
* stat.words as f32 / stat.lines as f32)
*.unwrap_or_else(|e| error(Box::new(e))); */
// }
Ok(())
} |
use chrono::Local; | random_line_split |
mir.rs | /// ///////////////////////////////////////////////////////////////////////////
/// File: Annealing/Solver/MIPS.rs
/// ///////////////////////////////////////////////////////////////////////////
/// Copyright 2017 Giovanni Mazzeo
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
/// ///////////////////////////////////////////////////////////////////////////
/// ****************************************************************************
/// *****************************************************************************
/// **
/// Multiple Independent Run (MIR)
/// *
/// *****************************************************************************
/// ****************************************************************************
use annealing::solver::Solver;
use annealing::problem::Problem;
use annealing::cooler::{Cooler, StepsCooler, TimeCooler};
use annealing::solver::common;
use annealing::solver::common::MrResult;
use res_emitters;
use res_emitters::Emitter;
use annealing::solver::common::IntermediateResults;
use shared::TunerParameter;
use time;
use CoolingSchedule;
use EnergyType;
use pbr;
use rand;
use libc;
use num_cpus;
use rand::{Rng, thread_rng};
use rand::distributions::{Range, IndependentSample};
use ansi_term::Colour::Green;
use std::collections::HashMap;
use pbr::{ProgressBar, MultiBar};
use std::thread;
use std::sync::mpsc::channel;
#[derive(Debug, Clone)]
pub struct Mir {
pub tuner_params: TunerParameter,
pub res_emitter: Emitter,
}
impl Solver for Mir {
fn solve(&mut self, problem: &mut Problem, num_workers: usize) -> MrResult | // given by the user and by other num_workers-1 states generated in a random way
let mut initial_state = problem.initial_state();
let mut initial_states_pool = common::StatesPool::new();
initial_states_pool.push(initial_state.clone());
for i in 1..num_workers {
initial_states_pool.push(problem.rand_state());
}
// Create a muti-bar
let mut mb = MultiBar::new();
let threads_res = common::ThreadsResults::new();
let mut overall_start_time = time::precise_time_ns();
let handles: Vec<_> = (0..num_workers).map(|worker_nr| {
let mut pb=mb.create_bar((self.tuner_params.max_step) as u64);
pb.show_message = true;
let (mut master_state_c, mut problem_c) = (initial_state.clone(), problem.clone());
let (elapsed_steps_c,
initial_states_pool_c,
threads_res_c) = (elapsed_steps.clone(),
initial_states_pool.clone(),
threads_res.clone());
let nrg_type = self.clone().tuner_params.energy;
let max_steps= self.clone().tuner_params.max_step;
let cooling_sched= self.clone().tuner_params.cooling;
let max_temp=self.tuner_params.max_temp.unwrap().clone();
let cooler_c=cooler.clone();
let is=initial_state.clone();
let mut res_emitter=self.clone().res_emitter;
/************************************************************************************************************/
thread::spawn(move || {
let mut attempted = 0;
let mut total_improves = 0;
let mut subsequent_improves = 0;
let mut accepted = 0;
let mut rejected = 0;
let mut temperature = common::Temperature::new(max_temp, cooler_c.clone(), cooling_sched);
let mut worker_elapsed_steps=0;
let mut rng = thread_rng();
let mut start_time = time::precise_time_ns();
let mut worker_state=initial_states_pool_c.remove_one().unwrap();
let mut worker_nrg = match problem_c.energy(&worker_state.clone(), worker_nr) {
Some(nrg) => nrg,
None => panic!("The initial configuration does not allow to calculate the energy"),
};
let mut last_nrg=worker_nrg;
let mut elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
let time_2_complete_hrs = ((elapsed_time as f64) * max_steps as f64) / 3600.0;
let range = Range::new(0.0, 1.0);
let temperature_c=temperature.clone();
let (tx, rx) = channel::<IntermediateResults>();
// Spawn the thread that will take care of writing results into a CSV file
thread::spawn(move || loop {
let mut elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
match rx.recv() {
Ok(res) => {
res_emitter.send_update(temperature_c.get(),
elapsed_time,
0.0,
res.last_nrg,
&res.last_state,
res.best_nrg,
&res.best_state,
worker_elapsed_steps,res.tid);
}
Err(e) => {}
}
});
/************************************************************************************************************/
let threads_res=common::ThreadsResults::new();
loop{
if worker_elapsed_steps > max_steps || rejected>300{
break;
}
elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
//let time_2_complete_mins=exec_time*(((max_steps/num_workers) - worker_elapsed_steps) as f64) / 60.0;
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------------------------------------------"));
println!("{} TID[{}] - Completed Steps: {:.2} - Percentage of Completion: {:.2}% - Estimated \
time to Complete: {:.2} Hrs",
Green.paint("[TUNER]"),
worker_nr,
worker_elapsed_steps,
(worker_elapsed_steps as f64 / (cooler_c.max_steps/num_workers) as f64) * 100.0,
elapsed_time);
println!("{} Total Accepted Solutions: {:?} - Current Temperature: {:.2} - Elapsed \
Time: {:.2} s",
Green.paint("[TUNER]"),
accepted,
temperature.get(),
elapsed_time);
println!("{} Accepted State: {:?}", Green.paint("[TUNER]"), worker_state);
println!("{} Accepted Energy: {:.4} - Last Measured Energy: {:.4}",
Green.paint("[TUNER]"),
worker_nrg,
last_nrg);
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------------------------------------------"));
pb.message(&format!("TID [{}] - Status - ", worker_nr));
pb.inc();
let mut last_state=worker_state.clone();
worker_state = {
let next_state = problem_c.new_state(&worker_state,max_steps,worker_elapsed_steps);
let accepted_state = match problem_c.energy(&next_state.clone(), worker_nr) {
Some(new_energy) => {
last_nrg = new_energy;
println!("Thread : {:?} - Step: {:?} - State: {:?} - Energy: {:?}",worker_nr,worker_elapsed_steps,next_state,new_energy);
let de = match nrg_type {
EnergyType::throughput => new_energy - worker_nrg,
EnergyType::latency => -(new_energy - worker_nrg),
};
if de > 0.0 || range.ind_sample(&mut rng) <= (de / temperature.get()).exp() {
accepted+=1;
worker_nrg = new_energy;
if de > 0.0 {
rejected=0;
total_improves = total_improves + 1;
subsequent_improves = subsequent_improves + 1;
}
next_state
} else {
rejected+=1;
worker_state
}
}
None => {
println!("{} The current configuration parameters cannot be evaluated. \
Skip!",
Green.paint("[TUNER]"));
worker_state
}
};
accepted_state
};
let intermediate_res=IntermediateResults{
last_nrg: last_nrg,
last_state:last_state.clone(),
best_nrg: worker_nrg,
best_state: worker_state.clone(),
tid: worker_nr
};
tx.send(intermediate_res);
worker_elapsed_steps+=1;
temperature.update(worker_elapsed_steps);
}
let res=common::MrResult{
energy: worker_nrg,
state: worker_state,
};
threads_res_c.push(res);
pb.finish_print(&format!("Child Thread [{}] Terminated the Execution", worker_nr));
})
}).collect();
mb.listen();
// Wait for all threads to complete before start a search in a new set of neighborhoods.
for h in handles {
h.join().unwrap();
}
/// *********************************************************************************************************
// Get results of worker threads (each one will put its best evaluated energy) and
// choose between them which one will be the best one.
let mut workers_res = threads_res.get_coll();
let first_elem = workers_res.pop().unwrap();
let mut best_energy = first_elem.energy;
let mut best_state = first_elem.state;
for elem in workers_res.iter() {
let diff = match self.tuner_params.energy {
EnergyType::throughput => elem.energy - best_energy,
EnergyType::latency => -(elem.energy - best_energy),
};
if diff > 0.0 {
best_energy = elem.clone().energy;
best_state = elem.clone().state;
}
}
MrResult {
energy: best_energy,
state: best_state,
}
}
}
| {
let cooler = StepsCooler {
max_steps: self.tuner_params.max_step,
min_temp: self.tuner_params.min_temp.unwrap(),
max_temp: self.tuner_params.max_temp.unwrap(),
};
("{}",Green.paint("\n-------------------------------------------------------------------------------------------------------------------"));
println!(
"{} Initialization Phase: Evaluation of Energy for Default Parameters",
Green.paint("[TUNER]")
);
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------"));
let mut elapsed_steps = common::SharedGenericCounter::new();
// Creation of the pool of Initial States. It will be composed by the initial default state | identifier_body |
mir.rs | /// ///////////////////////////////////////////////////////////////////////////
/// File: Annealing/Solver/MIPS.rs
/// ///////////////////////////////////////////////////////////////////////////
/// Copyright 2017 Giovanni Mazzeo
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
/// ///////////////////////////////////////////////////////////////////////////
/// ****************************************************************************
/// *****************************************************************************
/// **
/// Multiple Independent Run (MIR)
/// *
/// *****************************************************************************
/// ****************************************************************************
use annealing::solver::Solver;
use annealing::problem::Problem;
use annealing::cooler::{Cooler, StepsCooler, TimeCooler};
use annealing::solver::common;
use annealing::solver::common::MrResult;
use res_emitters;
use res_emitters::Emitter;
use annealing::solver::common::IntermediateResults;
use shared::TunerParameter;
use time;
use CoolingSchedule;
use EnergyType;
use pbr;
use rand;
use libc;
use num_cpus;
use rand::{Rng, thread_rng};
use rand::distributions::{Range, IndependentSample};
use ansi_term::Colour::Green;
use std::collections::HashMap;
use pbr::{ProgressBar, MultiBar};
use std::thread;
use std::sync::mpsc::channel;
#[derive(Debug, Clone)]
pub struct Mir {
pub tuner_params: TunerParameter,
pub res_emitter: Emitter,
}
impl Solver for Mir {
fn | (&mut self, problem: &mut Problem, num_workers: usize) -> MrResult {
let cooler = StepsCooler {
max_steps: self.tuner_params.max_step,
min_temp: self.tuner_params.min_temp.unwrap(),
max_temp: self.tuner_params.max_temp.unwrap(),
};
("{}",Green.paint("\n-------------------------------------------------------------------------------------------------------------------"));
println!(
"{} Initialization Phase: Evaluation of Energy for Default Parameters",
Green.paint("[TUNER]")
);
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------"));
let mut elapsed_steps = common::SharedGenericCounter::new();
// Creation of the pool of Initial States. It will be composed by the initial default state
// given by the user and by other num_workers-1 states generated in a random way
let mut initial_state = problem.initial_state();
let mut initial_states_pool = common::StatesPool::new();
initial_states_pool.push(initial_state.clone());
for i in 1..num_workers {
initial_states_pool.push(problem.rand_state());
}
// Create a muti-bar
let mut mb = MultiBar::new();
let threads_res = common::ThreadsResults::new();
let mut overall_start_time = time::precise_time_ns();
let handles: Vec<_> = (0..num_workers).map(|worker_nr| {
let mut pb=mb.create_bar((self.tuner_params.max_step) as u64);
pb.show_message = true;
let (mut master_state_c, mut problem_c) = (initial_state.clone(), problem.clone());
let (elapsed_steps_c,
initial_states_pool_c,
threads_res_c) = (elapsed_steps.clone(),
initial_states_pool.clone(),
threads_res.clone());
let nrg_type = self.clone().tuner_params.energy;
let max_steps= self.clone().tuner_params.max_step;
let cooling_sched= self.clone().tuner_params.cooling;
let max_temp=self.tuner_params.max_temp.unwrap().clone();
let cooler_c=cooler.clone();
let is=initial_state.clone();
let mut res_emitter=self.clone().res_emitter;
/************************************************************************************************************/
thread::spawn(move || {
let mut attempted = 0;
let mut total_improves = 0;
let mut subsequent_improves = 0;
let mut accepted = 0;
let mut rejected = 0;
let mut temperature = common::Temperature::new(max_temp, cooler_c.clone(), cooling_sched);
let mut worker_elapsed_steps=0;
let mut rng = thread_rng();
let mut start_time = time::precise_time_ns();
let mut worker_state=initial_states_pool_c.remove_one().unwrap();
let mut worker_nrg = match problem_c.energy(&worker_state.clone(), worker_nr) {
Some(nrg) => nrg,
None => panic!("The initial configuration does not allow to calculate the energy"),
};
let mut last_nrg=worker_nrg;
let mut elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
let time_2_complete_hrs = ((elapsed_time as f64) * max_steps as f64) / 3600.0;
let range = Range::new(0.0, 1.0);
let temperature_c=temperature.clone();
let (tx, rx) = channel::<IntermediateResults>();
// Spawn the thread that will take care of writing results into a CSV file
thread::spawn(move || loop {
let mut elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
match rx.recv() {
Ok(res) => {
res_emitter.send_update(temperature_c.get(),
elapsed_time,
0.0,
res.last_nrg,
&res.last_state,
res.best_nrg,
&res.best_state,
worker_elapsed_steps,res.tid);
}
Err(e) => {}
}
});
/************************************************************************************************************/
let threads_res=common::ThreadsResults::new();
loop{
if worker_elapsed_steps > max_steps || rejected>300{
break;
}
elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
//let time_2_complete_mins=exec_time*(((max_steps/num_workers) - worker_elapsed_steps) as f64) / 60.0;
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------------------------------------------"));
println!("{} TID[{}] - Completed Steps: {:.2} - Percentage of Completion: {:.2}% - Estimated \
time to Complete: {:.2} Hrs",
Green.paint("[TUNER]"),
worker_nr,
worker_elapsed_steps,
(worker_elapsed_steps as f64 / (cooler_c.max_steps/num_workers) as f64) * 100.0,
elapsed_time);
println!("{} Total Accepted Solutions: {:?} - Current Temperature: {:.2} - Elapsed \
Time: {:.2} s",
Green.paint("[TUNER]"),
accepted,
temperature.get(),
elapsed_time);
println!("{} Accepted State: {:?}", Green.paint("[TUNER]"), worker_state);
println!("{} Accepted Energy: {:.4} - Last Measured Energy: {:.4}",
Green.paint("[TUNER]"),
worker_nrg,
last_nrg);
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------------------------------------------"));
pb.message(&format!("TID [{}] - Status - ", worker_nr));
pb.inc();
let mut last_state=worker_state.clone();
worker_state = {
let next_state = problem_c.new_state(&worker_state,max_steps,worker_elapsed_steps);
let accepted_state = match problem_c.energy(&next_state.clone(), worker_nr) {
Some(new_energy) => {
last_nrg = new_energy;
println!("Thread : {:?} - Step: {:?} - State: {:?} - Energy: {:?}",worker_nr,worker_elapsed_steps,next_state,new_energy);
let de = match nrg_type {
EnergyType::throughput => new_energy - worker_nrg,
EnergyType::latency => -(new_energy - worker_nrg),
};
if de > 0.0 || range.ind_sample(&mut rng) <= (de / temperature.get()).exp() {
accepted+=1;
worker_nrg = new_energy;
if de > 0.0 {
rejected=0;
total_improves = total_improves + 1;
subsequent_improves = subsequent_improves + 1;
}
next_state
} else {
rejected+=1;
worker_state
}
}
None => {
println!("{} The current configuration parameters cannot be evaluated. \
Skip!",
Green.paint("[TUNER]"));
worker_state
}
};
accepted_state
};
let intermediate_res=IntermediateResults{
last_nrg: last_nrg,
last_state:last_state.clone(),
best_nrg: worker_nrg,
best_state: worker_state.clone(),
tid: worker_nr
};
tx.send(intermediate_res);
worker_elapsed_steps+=1;
temperature.update(worker_elapsed_steps);
}
let res=common::MrResult{
energy: worker_nrg,
state: worker_state,
};
threads_res_c.push(res);
pb.finish_print(&format!("Child Thread [{}] Terminated the Execution", worker_nr));
})
}).collect();
mb.listen();
// Wait for all threads to complete before start a search in a new set of neighborhoods.
for h in handles {
h.join().unwrap();
}
/// *********************************************************************************************************
// Get results of worker threads (each one will put its best evaluated energy) and
// choose between them which one will be the best one.
let mut workers_res = threads_res.get_coll();
let first_elem = workers_res.pop().unwrap();
let mut best_energy = first_elem.energy;
let mut best_state = first_elem.state;
for elem in workers_res.iter() {
let diff = match self.tuner_params.energy {
EnergyType::throughput => elem.energy - best_energy,
EnergyType::latency => -(elem.energy - best_energy),
};
if diff > 0.0 {
best_energy = elem.clone().energy;
best_state = elem.clone().state;
}
}
MrResult {
energy: best_energy,
state: best_state,
}
}
}
| solve | identifier_name |
mir.rs | /// ///////////////////////////////////////////////////////////////////////////
/// File: Annealing/Solver/MIPS.rs
/// ///////////////////////////////////////////////////////////////////////////
/// Copyright 2017 Giovanni Mazzeo
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
/// ///////////////////////////////////////////////////////////////////////////
/// ****************************************************************************
/// *****************************************************************************
/// **
/// Multiple Independent Run (MIR)
/// *
/// *****************************************************************************
/// ****************************************************************************
use annealing::solver::Solver;
use annealing::problem::Problem;
use annealing::cooler::{Cooler, StepsCooler, TimeCooler};
use annealing::solver::common;
use annealing::solver::common::MrResult;
use res_emitters;
use res_emitters::Emitter;
use annealing::solver::common::IntermediateResults;
use shared::TunerParameter;
use time;
use CoolingSchedule;
use EnergyType;
use pbr;
use rand;
use libc;
use num_cpus;
use rand::{Rng, thread_rng};
use rand::distributions::{Range, IndependentSample};
use ansi_term::Colour::Green;
use std::collections::HashMap;
use pbr::{ProgressBar, MultiBar};
use std::thread;
use std::sync::mpsc::channel;
#[derive(Debug, Clone)]
pub struct Mir {
pub tuner_params: TunerParameter,
pub res_emitter: Emitter,
}
impl Solver for Mir {
fn solve(&mut self, problem: &mut Problem, num_workers: usize) -> MrResult {
let cooler = StepsCooler {
max_steps: self.tuner_params.max_step,
min_temp: self.tuner_params.min_temp.unwrap(),
max_temp: self.tuner_params.max_temp.unwrap(),
};
("{}",Green.paint("\n-------------------------------------------------------------------------------------------------------------------"));
println!(
"{} Initialization Phase: Evaluation of Energy for Default Parameters",
Green.paint("[TUNER]")
);
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------"));
let mut elapsed_steps = common::SharedGenericCounter::new();
// Creation of the pool of Initial States. It will be composed by the initial default state
// given by the user and by other num_workers-1 states generated in a random way
let mut initial_state = problem.initial_state();
let mut initial_states_pool = common::StatesPool::new();
initial_states_pool.push(initial_state.clone());
for i in 1..num_workers {
initial_states_pool.push(problem.rand_state());
}
// Create a muti-bar
let mut mb = MultiBar::new();
let threads_res = common::ThreadsResults::new();
let mut overall_start_time = time::precise_time_ns();
let handles: Vec<_> = (0..num_workers).map(|worker_nr| {
let mut pb=mb.create_bar((self.tuner_params.max_step) as u64);
pb.show_message = true;
let (mut master_state_c, mut problem_c) = (initial_state.clone(), problem.clone());
let (elapsed_steps_c,
initial_states_pool_c,
threads_res_c) = (elapsed_steps.clone(),
initial_states_pool.clone(),
threads_res.clone());
let nrg_type = self.clone().tuner_params.energy;
let max_steps= self.clone().tuner_params.max_step;
let cooling_sched= self.clone().tuner_params.cooling;
let max_temp=self.tuner_params.max_temp.unwrap().clone();
let cooler_c=cooler.clone();
let is=initial_state.clone();
let mut res_emitter=self.clone().res_emitter;
/************************************************************************************************************/
thread::spawn(move || {
let mut attempted = 0;
let mut total_improves = 0;
let mut subsequent_improves = 0;
let mut accepted = 0;
let mut rejected = 0;
let mut temperature = common::Temperature::new(max_temp, cooler_c.clone(), cooling_sched);
let mut worker_elapsed_steps=0;
let mut rng = thread_rng();
let mut start_time = time::precise_time_ns();
let mut worker_state=initial_states_pool_c.remove_one().unwrap();
let mut worker_nrg = match problem_c.energy(&worker_state.clone(), worker_nr) {
Some(nrg) => nrg,
None => panic!("The initial configuration does not allow to calculate the energy"),
};
let mut last_nrg=worker_nrg;
let mut elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
let time_2_complete_hrs = ((elapsed_time as f64) * max_steps as f64) / 3600.0;
let range = Range::new(0.0, 1.0);
let temperature_c=temperature.clone();
let (tx, rx) = channel::<IntermediateResults>();
// Spawn the thread that will take care of writing results into a CSV file
thread::spawn(move || loop {
let mut elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
match rx.recv() {
Ok(res) => {
res_emitter.send_update(temperature_c.get(),
elapsed_time,
0.0,
res.last_nrg,
&res.last_state,
res.best_nrg,
&res.best_state,
worker_elapsed_steps,res.tid);
}
Err(e) => {}
}
});
/************************************************************************************************************/
let threads_res=common::ThreadsResults::new();
loop{
if worker_elapsed_steps > max_steps || rejected>300{
break;
}
elapsed_time = (time::precise_time_ns() - start_time) as f64 / 1000000000.0f64;
//let time_2_complete_mins=exec_time*(((max_steps/num_workers) - worker_elapsed_steps) as f64) / 60.0;
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------------------------------------------"));
println!("{} TID[{}] - Completed Steps: {:.2} - Percentage of Completion: {:.2}% - Estimated \
time to Complete: {:.2} Hrs",
Green.paint("[TUNER]"),
worker_nr,
worker_elapsed_steps,
(worker_elapsed_steps as f64 / (cooler_c.max_steps/num_workers) as f64) * 100.0,
elapsed_time);
println!("{} Total Accepted Solutions: {:?} - Current Temperature: {:.2} - Elapsed \
Time: {:.2} s",
Green.paint("[TUNER]"),
accepted,
temperature.get(),
elapsed_time);
println!("{} Accepted State: {:?}", Green.paint("[TUNER]"), worker_state);
println!("{} Accepted Energy: {:.4} - Last Measured Energy: {:.4}",
Green.paint("[TUNER]"), | worker_nrg,
last_nrg);
println!("{}",Green.paint("-------------------------------------------------------------------------------------------------------------------------------------------------------"));
pb.message(&format!("TID [{}] - Status - ", worker_nr));
pb.inc();
let mut last_state=worker_state.clone();
worker_state = {
let next_state = problem_c.new_state(&worker_state,max_steps,worker_elapsed_steps);
let accepted_state = match problem_c.energy(&next_state.clone(), worker_nr) {
Some(new_energy) => {
last_nrg = new_energy;
println!("Thread : {:?} - Step: {:?} - State: {:?} - Energy: {:?}",worker_nr,worker_elapsed_steps,next_state,new_energy);
let de = match nrg_type {
EnergyType::throughput => new_energy - worker_nrg,
EnergyType::latency => -(new_energy - worker_nrg),
};
if de > 0.0 || range.ind_sample(&mut rng) <= (de / temperature.get()).exp() {
accepted+=1;
worker_nrg = new_energy;
if de > 0.0 {
rejected=0;
total_improves = total_improves + 1;
subsequent_improves = subsequent_improves + 1;
}
next_state
} else {
rejected+=1;
worker_state
}
}
None => {
println!("{} The current configuration parameters cannot be evaluated. \
Skip!",
Green.paint("[TUNER]"));
worker_state
}
};
accepted_state
};
let intermediate_res=IntermediateResults{
last_nrg: last_nrg,
last_state:last_state.clone(),
best_nrg: worker_nrg,
best_state: worker_state.clone(),
tid: worker_nr
};
tx.send(intermediate_res);
worker_elapsed_steps+=1;
temperature.update(worker_elapsed_steps);
}
let res=common::MrResult{
energy: worker_nrg,
state: worker_state,
};
threads_res_c.push(res);
pb.finish_print(&format!("Child Thread [{}] Terminated the Execution", worker_nr));
})
}).collect();
mb.listen();
// Wait for all threads to complete before start a search in a new set of neighborhoods.
for h in handles {
h.join().unwrap();
}
/// *********************************************************************************************************
// Get results of worker threads (each one will put its best evaluated energy) and
// choose between them which one will be the best one.
let mut workers_res = threads_res.get_coll();
let first_elem = workers_res.pop().unwrap();
let mut best_energy = first_elem.energy;
let mut best_state = first_elem.state;
for elem in workers_res.iter() {
let diff = match self.tuner_params.energy {
EnergyType::throughput => elem.energy - best_energy,
EnergyType::latency => -(elem.energy - best_energy),
};
if diff > 0.0 {
best_energy = elem.clone().energy;
best_state = elem.clone().state;
}
}
MrResult {
energy: best_energy,
state: best_state,
}
}
} | random_line_split |
|
cli.rs | extern crate tetrs;
use std::io::prelude::*;
// FIXME! Little hack to clear the screen :)
extern "C" { fn system(s: *const u8); }
fn clear_screen() { unsafe {
system("@clear||cls\0".as_ptr());
}}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Input {
None,
Left,
Right,
RotateCW,
RotateCCW,
SoftDrop,
HardDrop,
Gravity,
Quit,
Help,
Invalid,
}
fn input() -> Input |
fn bot(state: &mut tetrs::State) -> bool {
let weights = tetrs::Weights::default();
let bot = tetrs::PlayI::play(&weights, state.well(), *state.player().unwrap());
if bot.play.len() == 0 {
state.hard_drop();
return false;
}
let mut result = true;
for play in bot.play {
use tetrs::Play;
result &= match play {
Play::MoveLeft => state.move_left(),
Play::MoveRight => state.move_right(),
Play::RotateCW => state.rotate_cw(),
Play::RotateCCW => state.rotate_ccw(),
Play::SoftDrop => state.soft_drop(),
Play::HardDrop => state.hard_drop(),
Play::Idle => true,
};
if!result {
break;
}
}
result
}
static TILESET: [char; 32] = [
'O', 'I', 'S', 'Z', 'L', 'J', 'T', 'x',
'_', '_', '_', '_', '_', '_', '_', 'x',
'O', 'I', 'S', 'Z', 'L', 'J', 'T', '□',
'.', '_','', 'x', 'x', 'x', 'x', 'x',
];
fn draw(scene: &tetrs::Scene) {
for row in 0..scene.height() {
print!("|");
let line = scene.line(row);
for &tile in line {
let tile: u8 = tile.into();
let c = TILESET[(tile >> 3) as usize];
print!("{}", c);
}
print!("|\n");
}
print!("+");
for _ in 0..scene.width() {
print!("-");
}
print!("+\n");
}
const WELCOME_MESSAGE: &'static str = "
Welcome to Adventure Tetrs!
After the playing field is shown, you will be asked for input.
>>> A, Q, LEFT
Move the piece to the left.
>>> D, RIGHT
Move the piece to the right.
>>> CW, RR, ROT
Rotate the piece clockwise.
>>> CCW, RL
Rotate the piece counter-clockwise.
>>> S, DOWN, SOFT, SOFT DROP
Soft drop, move the piece down once.
>>> W, Z, DROP, HARD DROP
Hard drop, drops the piece down and locks into place.
>>> G, GRAVITY
Apply gravity, same as a soft drop.
>>> QUIT, QUTI
Quit the game.
>>> H, HELP
Print this help message.
";
fn main() {
clear_screen();
println!("{}", WELCOME_MESSAGE);
use tetrs::Bag;
let mut state = tetrs::State::new(10, 22);
let mut bag = tetrs::OfficialBag::default();
let mut next_piece = bag.next(state.well()).unwrap();
state.spawn(next_piece);
loop {
draw(&state.scene());
// Check for pieces in the spawning area
if state.is_game_over() {
println!("Game Over!");
break;
}
match input() {
Input::None => bot(&mut state),
Input::Quit => break,
Input::Left => state.move_left(),
Input::Right => state.move_right(),
Input::RotateCW => state.rotate_cw(),
Input::RotateCCW => state.rotate_ccw(),
Input::SoftDrop => state.soft_drop(),
Input::HardDrop => state.hard_drop(),
Input::Gravity => state.gravity(),
_ => true,
};
// Spawn a new piece as needed
if state.player().is_none() {
next_piece = bag.next(state.well()).unwrap();
if state.spawn(next_piece) {
println!("Game Over!");
break;
}
}
state.clear_lines(|_| ());
clear_screen();
}
println!("Thanks for playing!");
}
| {
print!(">>> ");
std::io::stdout().flush().unwrap();
let mut action = String::new();
std::io::stdin().read_line(&mut action).unwrap();
match &*action.trim().to_uppercase() {
"" => Input::None,
"A" | "Q" | "LEFT" => Input::Left,
"D" | "RIGHT" => Input::Right,
"CW" | "RR" | "ROT" => Input::RotateCW,
"CCW" | "RL" => Input::RotateCCW,
"S" | "DOWN" | "SOFT" | "SOFT DROP" => Input::SoftDrop,
"W" | "Z" | "DROP" | "HARD DROP" => Input::HardDrop,
"G" | "GRAVITY" => Input::Gravity,
"QUIT" | "QUTI" => Input::Quit,
"H" | "HELP" => Input::Help,
_ => Input::Invalid,
}
} | identifier_body |
cli.rs | extern crate tetrs;
use std::io::prelude::*;
// FIXME! Little hack to clear the screen :)
extern "C" { fn system(s: *const u8); }
fn clear_screen() { unsafe {
system("@clear||cls\0".as_ptr());
}}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum | {
None,
Left,
Right,
RotateCW,
RotateCCW,
SoftDrop,
HardDrop,
Gravity,
Quit,
Help,
Invalid,
}
fn input() -> Input {
print!(">>> ");
std::io::stdout().flush().unwrap();
let mut action = String::new();
std::io::stdin().read_line(&mut action).unwrap();
match &*action.trim().to_uppercase() {
"" => Input::None,
"A" | "Q" | "LEFT" => Input::Left,
"D" | "RIGHT" => Input::Right,
"CW" | "RR" | "ROT" => Input::RotateCW,
"CCW" | "RL" => Input::RotateCCW,
"S" | "DOWN" | "SOFT" | "SOFT DROP" => Input::SoftDrop,
"W" | "Z" | "DROP" | "HARD DROP" => Input::HardDrop,
"G" | "GRAVITY" => Input::Gravity,
"QUIT" | "QUTI" => Input::Quit,
"H" | "HELP" => Input::Help,
_ => Input::Invalid,
}
}
fn bot(state: &mut tetrs::State) -> bool {
let weights = tetrs::Weights::default();
let bot = tetrs::PlayI::play(&weights, state.well(), *state.player().unwrap());
if bot.play.len() == 0 {
state.hard_drop();
return false;
}
let mut result = true;
for play in bot.play {
use tetrs::Play;
result &= match play {
Play::MoveLeft => state.move_left(),
Play::MoveRight => state.move_right(),
Play::RotateCW => state.rotate_cw(),
Play::RotateCCW => state.rotate_ccw(),
Play::SoftDrop => state.soft_drop(),
Play::HardDrop => state.hard_drop(),
Play::Idle => true,
};
if!result {
break;
}
}
result
}
static TILESET: [char; 32] = [
'O', 'I', 'S', 'Z', 'L', 'J', 'T', 'x',
'_', '_', '_', '_', '_', '_', '_', 'x',
'O', 'I', 'S', 'Z', 'L', 'J', 'T', '□',
'.', '_','', 'x', 'x', 'x', 'x', 'x',
];
fn draw(scene: &tetrs::Scene) {
for row in 0..scene.height() {
print!("|");
let line = scene.line(row);
for &tile in line {
let tile: u8 = tile.into();
let c = TILESET[(tile >> 3) as usize];
print!("{}", c);
}
print!("|\n");
}
print!("+");
for _ in 0..scene.width() {
print!("-");
}
print!("+\n");
}
const WELCOME_MESSAGE: &'static str = "
Welcome to Adventure Tetrs!
After the playing field is shown, you will be asked for input.
>>> A, Q, LEFT
Move the piece to the left.
>>> D, RIGHT
Move the piece to the right.
>>> CW, RR, ROT
Rotate the piece clockwise.
>>> CCW, RL
Rotate the piece counter-clockwise.
>>> S, DOWN, SOFT, SOFT DROP
Soft drop, move the piece down once.
>>> W, Z, DROP, HARD DROP
Hard drop, drops the piece down and locks into place.
>>> G, GRAVITY
Apply gravity, same as a soft drop.
>>> QUIT, QUTI
Quit the game.
>>> H, HELP
Print this help message.
";
fn main() {
clear_screen();
println!("{}", WELCOME_MESSAGE);
use tetrs::Bag;
let mut state = tetrs::State::new(10, 22);
let mut bag = tetrs::OfficialBag::default();
let mut next_piece = bag.next(state.well()).unwrap();
state.spawn(next_piece);
loop {
draw(&state.scene());
// Check for pieces in the spawning area
if state.is_game_over() {
println!("Game Over!");
break;
}
match input() {
Input::None => bot(&mut state),
Input::Quit => break,
Input::Left => state.move_left(),
Input::Right => state.move_right(),
Input::RotateCW => state.rotate_cw(),
Input::RotateCCW => state.rotate_ccw(),
Input::SoftDrop => state.soft_drop(),
Input::HardDrop => state.hard_drop(),
Input::Gravity => state.gravity(),
_ => true,
};
// Spawn a new piece as needed
if state.player().is_none() {
next_piece = bag.next(state.well()).unwrap();
if state.spawn(next_piece) {
println!("Game Over!");
break;
}
}
state.clear_lines(|_| ());
clear_screen();
}
println!("Thanks for playing!");
}
| Input | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.