file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sdiv.rs
|
use num::Num;
#[inline]
pub fn sdiv<'a, 'b, T: Copy + Num>(out: &'a mut [T; 9], a: &'b [T; 9], s: T) -> &'a mut [T; 9] {
let not_zero = s!= T::zero();
out[0] = if not_zero {a[0] / s} else {T::zero()};
out[1] = if not_zero {a[1] / s} else {T::zero()};
out[2] = if not_zero {a[2] / s} else {T::zero()};
out[3] = if not_zero {a[3] / s} else {T::zero()};
out[4] = if not_zero {a[4] / s} else {T::zero()};
out[5] = if not_zero {a[5] / s} else {T::zero()};
out[6] = if not_zero {a[6] / s} else {T::zero()};
out[7] = if not_zero {a[7] / s} else {T::zero()};
out[8] = if not_zero {a[8] / s} else {T::zero()};
out
}
#[test]
fn test_sdiv()
|
{
let mut v = [0, 0, 0, 0, 0, 0, 0, 0, 0];
sdiv(&mut v, &[1, 0, 0, 0, 1, 0, 0, 0, 1], 1);
assert!(v == [1, 0, 0, 0, 1, 0, 0, 0, 1]);
}
|
identifier_body
|
|
sdiv.rs
|
use num::Num;
#[inline]
pub fn sdiv<'a, 'b, T: Copy + Num>(out: &'a mut [T; 9], a: &'b [T; 9], s: T) -> &'a mut [T; 9] {
let not_zero = s!= T::zero();
out[0] = if not_zero {a[0] / s} else {T::zero()};
out[1] = if not_zero {a[1] / s} else {T::zero()};
out[2] = if not_zero {a[2] / s} else {T::zero()};
out[3] = if not_zero {a[3] / s} else {T::zero()};
out[4] = if not_zero {a[4] / s} else {T::zero()};
out[5] = if not_zero {a[5] / s} else {T::zero()};
out[6] = if not_zero {a[6] / s} else {T::zero()};
out[7] = if not_zero {a[7] / s} else {T::zero()};
out[8] = if not_zero {a[8] / s} else {T::zero()};
out
}
#[test]
fn
|
() {
let mut v = [0, 0, 0, 0, 0, 0, 0, 0, 0];
sdiv(&mut v, &[1, 0, 0, 0, 1, 0, 0, 0, 1], 1);
assert!(v == [1, 0, 0, 0, 1, 0, 0, 0, 1]);
}
|
test_sdiv
|
identifier_name
|
response.rs
|
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std::collections::BTreeMap;
use chrono::Utc;
use serde_json;
use serde::Serialize;
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// A trait that converts data from the handler function to a u8 slice.
pub trait ToOutput {
fn to_output(&self) -> &[u8];
}
impl ToOutput for str {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for &'static str {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for String {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for Vec<u8> {
fn to_output(&self) -> &[u8] {
self.as_slice()
}
}
/// This struct reprsents the response to an HTTP client.
#[derive(Debug, Default)]
pub struct Response {
status: u16,
cmsg: String,
ctype: String,
headers: BTreeMap<String, String>,
payload: Vec<u8>,
}
impl Response {
/// Create a new, empty Response.
pub fn new() -> Response {
let mut res = Response {
status: 200,
cmsg: String::from("OK"),
ctype: String::from("text/plain"),
headers: BTreeMap::new(),
payload: Vec::with_capacity(2048),
};
let now = Utc::now().format("%a, %d %b %Y, %H:%M:%S %Z").to_string();
res.add_header("Connection", "close");
res.add_header("Server", &format!("canteen/{}", VERSION));
res.add_header("Date", &now);
res
}
/// Creates a Response with a JSON body
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::Response;
/// user serde::Serialize;
///
/// #[derive(Serialize)]
/// struct Foo {
/// item: i32,
/// }
///
/// let foo = Foo { item: 12345 };
/// let res = Response::as_json(&foo);
/// ```
pub fn as_json<T: Serialize>(data: &T) -> Response {
let mut res = Response::new();
res.set_content_type("application/json");
res.append(serde_json::to_string(data).unwrap());
res
}
/// Gets the HTTP message for a given status.
fn get_http_message(status: u16) -> String {
let msg = match status {
100 => "Continue",
101 => "Switching Protocols",
200 => "OK",
201 => "Created",
202 => "Accepted",
203 => "Non-Authoritative Information",
204 => "No Content",
205 => "Reset Content",
206 => "Partial Content",
300 => "Multiple Choices",
301 => "Moved Permanently",
302 => "Found",
303 => "See Other",
304 => "Not Modified",
305 => "Use Proxy",
307 => "Temporary Redirect",
400 => "Bad Request",
401 => "Unauthorized",
402 => "Payment Required",
403 => "Forbidden",
404 => "Not Found",
405 => "Method Not Allowed",
406 => "Not Acceptable",
407 => "Proxy Authentication Required",
408 => "Request Time Out",
409 => "Conflict",
410 => "Gone",
411 => "Length Required",
412 => "Precondition Failed",
413 => "Request Entity Too Large",
414 => "Request-URI Too Large",
415 => "Unsupported Media Type",
416 => "Requested Range Not Satisfiable",
417 => "Expectation Failed",
500 => "Internal Server Error",
501 => "Not Implemented",
502 => "Bad Gateway",
503 => "Service Unavailable",
504 => "Gateway Time-out",
505 => "HTTP Version Not Supported",
_ => "OK",
};
String::from(msg)
}
/// Sets the response status for the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.set_status(200);
/// ```
pub fn set_status(&mut self, status: u16) {
self.status = status;
self.cmsg = Response::get_http_message(status);
}
/// Sets the Content-Type header for the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.set_content_type("text/html");
/// ```
pub fn
|
(&mut self, ctype: &str) {
self.ctype = String::from(ctype);
}
/// Adds a header to the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.add_header("Content-Type", "text/html");
/// ```
pub fn add_header(&mut self, key: &str, value: &str) {
if!self.headers.contains_key(key) {
self.headers.insert(String::from(key), String::from(value));
}
}
/// Appends data to the body of the HTTP response. The trait ToOutput must
/// be implemented for the type passed.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// let data = "{ message: \"Hello, world!\" }";
/// res.append(data);
/// ```
pub fn append<T: ToOutput>(&mut self, payload: T) {
self.payload.extend(payload.to_output().iter());
}
/// Returns a byte array containing the full contents of the HTTP response,
/// for use by the Canteen struct.
pub fn gen_output(&self) -> Vec<u8> {
let mut output: Vec<u8> = Vec::with_capacity(self.payload.len() + 500);
let mut inter = String::new();
inter.push_str(&format!("HTTP/1.1 {} {}\r\n", self.status, self.cmsg));
for (key, value) in &self.headers {
inter.push_str(&format!("{}: {}\r\n", key, value));
}
inter.push_str(&format!("Content-Type: {}\r\n", self.ctype));
inter.push_str(&format!("Content-Length: {}\r\n", self.payload.len()));
inter.push_str("\r\n");
output.extend(inter.as_bytes());
output.extend(self.payload.iter());
output
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Serialize)]
struct Foo {
item: i32,
}
#[test]
fn test_response_as_json() {
let foo = Foo { item: 12345 };
let res_j = Response::as_json(&foo);
let mut res_r = Response::new();
res_r.set_content_type("application/json");
res_r.append(serde_json::to_string(&foo).unwrap());
assert_eq!(res_r.gen_output(), res_j.gen_output());
}
#[test]
fn test_response_http_message() {
assert_eq!("OK", Response::get_http_message(200));
}
#[test]
fn test_tooutput_trait_static_str() {
let ar: [u8; 3] = [97, 98, 99];
assert_eq!(ar, "abc".to_output());
}
#[test]
fn test_tooutput_trait_str() {
let ar: [u8; 3] = [97, 98, 99];
let st = "abc";
assert_eq!(ar, st.to_output());
}
#[test]
fn test_tooutput_trait_string() {
let ar: [u8; 3] = [97, 98, 99];
let st = String::from("abc");
assert_eq!(ar, st.to_output());
}
#[test]
fn test_tooutput_trait_vec() {
let ar: [u8; 5] = [1, 2, 3, 4, 5];
let vc: Vec<u8> = vec![1, 2, 3, 4, 5];
assert_eq!(ar, vc.to_output());
}
}
|
set_content_type
|
identifier_name
|
response.rs
|
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std::collections::BTreeMap;
use chrono::Utc;
use serde_json;
use serde::Serialize;
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// A trait that converts data from the handler function to a u8 slice.
pub trait ToOutput {
fn to_output(&self) -> &[u8];
}
impl ToOutput for str {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for &'static str {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for String {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for Vec<u8> {
fn to_output(&self) -> &[u8] {
self.as_slice()
}
}
/// This struct reprsents the response to an HTTP client.
#[derive(Debug, Default)]
pub struct Response {
status: u16,
cmsg: String,
ctype: String,
headers: BTreeMap<String, String>,
payload: Vec<u8>,
}
impl Response {
/// Create a new, empty Response.
pub fn new() -> Response {
let mut res = Response {
status: 200,
cmsg: String::from("OK"),
ctype: String::from("text/plain"),
headers: BTreeMap::new(),
payload: Vec::with_capacity(2048),
};
let now = Utc::now().format("%a, %d %b %Y, %H:%M:%S %Z").to_string();
res.add_header("Connection", "close");
res.add_header("Server", &format!("canteen/{}", VERSION));
res.add_header("Date", &now);
res
}
/// Creates a Response with a JSON body
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::Response;
/// user serde::Serialize;
///
/// #[derive(Serialize)]
/// struct Foo {
/// item: i32,
/// }
///
/// let foo = Foo { item: 12345 };
/// let res = Response::as_json(&foo);
/// ```
pub fn as_json<T: Serialize>(data: &T) -> Response {
let mut res = Response::new();
res.set_content_type("application/json");
res.append(serde_json::to_string(data).unwrap());
res
}
/// Gets the HTTP message for a given status.
fn get_http_message(status: u16) -> String {
let msg = match status {
100 => "Continue",
101 => "Switching Protocols",
200 => "OK",
201 => "Created",
202 => "Accepted",
203 => "Non-Authoritative Information",
204 => "No Content",
205 => "Reset Content",
206 => "Partial Content",
300 => "Multiple Choices",
301 => "Moved Permanently",
302 => "Found",
303 => "See Other",
304 => "Not Modified",
305 => "Use Proxy",
307 => "Temporary Redirect",
400 => "Bad Request",
401 => "Unauthorized",
402 => "Payment Required",
403 => "Forbidden",
404 => "Not Found",
405 => "Method Not Allowed",
406 => "Not Acceptable",
407 => "Proxy Authentication Required",
408 => "Request Time Out",
409 => "Conflict",
410 => "Gone",
411 => "Length Required",
412 => "Precondition Failed",
413 => "Request Entity Too Large",
414 => "Request-URI Too Large",
415 => "Unsupported Media Type",
416 => "Requested Range Not Satisfiable",
417 => "Expectation Failed",
500 => "Internal Server Error",
501 => "Not Implemented",
502 => "Bad Gateway",
503 => "Service Unavailable",
504 => "Gateway Time-out",
505 => "HTTP Version Not Supported",
_ => "OK",
};
String::from(msg)
}
/// Sets the response status for the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.set_status(200);
/// ```
pub fn set_status(&mut self, status: u16) {
self.status = status;
self.cmsg = Response::get_http_message(status);
}
/// Sets the Content-Type header for the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.set_content_type("text/html");
/// ```
pub fn set_content_type(&mut self, ctype: &str)
|
/// Adds a header to the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.add_header("Content-Type", "text/html");
/// ```
pub fn add_header(&mut self, key: &str, value: &str) {
if!self.headers.contains_key(key) {
self.headers.insert(String::from(key), String::from(value));
}
}
/// Appends data to the body of the HTTP response. The trait ToOutput must
/// be implemented for the type passed.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// let data = "{ message: \"Hello, world!\" }";
/// res.append(data);
/// ```
pub fn append<T: ToOutput>(&mut self, payload: T) {
self.payload.extend(payload.to_output().iter());
}
/// Returns a byte array containing the full contents of the HTTP response,
/// for use by the Canteen struct.
pub fn gen_output(&self) -> Vec<u8> {
let mut output: Vec<u8> = Vec::with_capacity(self.payload.len() + 500);
let mut inter = String::new();
inter.push_str(&format!("HTTP/1.1 {} {}\r\n", self.status, self.cmsg));
for (key, value) in &self.headers {
inter.push_str(&format!("{}: {}\r\n", key, value));
}
inter.push_str(&format!("Content-Type: {}\r\n", self.ctype));
inter.push_str(&format!("Content-Length: {}\r\n", self.payload.len()));
inter.push_str("\r\n");
output.extend(inter.as_bytes());
output.extend(self.payload.iter());
output
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Serialize)]
struct Foo {
item: i32,
}
#[test]
fn test_response_as_json() {
let foo = Foo { item: 12345 };
let res_j = Response::as_json(&foo);
let mut res_r = Response::new();
res_r.set_content_type("application/json");
res_r.append(serde_json::to_string(&foo).unwrap());
assert_eq!(res_r.gen_output(), res_j.gen_output());
}
#[test]
fn test_response_http_message() {
assert_eq!("OK", Response::get_http_message(200));
}
#[test]
fn test_tooutput_trait_static_str() {
let ar: [u8; 3] = [97, 98, 99];
assert_eq!(ar, "abc".to_output());
}
#[test]
fn test_tooutput_trait_str() {
let ar: [u8; 3] = [97, 98, 99];
let st = "abc";
assert_eq!(ar, st.to_output());
}
#[test]
fn test_tooutput_trait_string() {
let ar: [u8; 3] = [97, 98, 99];
let st = String::from("abc");
assert_eq!(ar, st.to_output());
}
#[test]
fn test_tooutput_trait_vec() {
let ar: [u8; 5] = [1, 2, 3, 4, 5];
let vc: Vec<u8> = vec![1, 2, 3, 4, 5];
assert_eq!(ar, vc.to_output());
}
}
|
{
self.ctype = String::from(ctype);
}
|
identifier_body
|
response.rs
|
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std::collections::BTreeMap;
use chrono::Utc;
use serde_json;
use serde::Serialize;
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// A trait that converts data from the handler function to a u8 slice.
pub trait ToOutput {
fn to_output(&self) -> &[u8];
}
impl ToOutput for str {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for &'static str {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for String {
fn to_output(&self) -> &[u8] {
self.as_bytes()
}
}
impl ToOutput for Vec<u8> {
fn to_output(&self) -> &[u8] {
self.as_slice()
}
|
status: u16,
cmsg: String,
ctype: String,
headers: BTreeMap<String, String>,
payload: Vec<u8>,
}
impl Response {
/// Create a new, empty Response.
pub fn new() -> Response {
let mut res = Response {
status: 200,
cmsg: String::from("OK"),
ctype: String::from("text/plain"),
headers: BTreeMap::new(),
payload: Vec::with_capacity(2048),
};
let now = Utc::now().format("%a, %d %b %Y, %H:%M:%S %Z").to_string();
res.add_header("Connection", "close");
res.add_header("Server", &format!("canteen/{}", VERSION));
res.add_header("Date", &now);
res
}
/// Creates a Response with a JSON body
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::Response;
/// user serde::Serialize;
///
/// #[derive(Serialize)]
/// struct Foo {
/// item: i32,
/// }
///
/// let foo = Foo { item: 12345 };
/// let res = Response::as_json(&foo);
/// ```
pub fn as_json<T: Serialize>(data: &T) -> Response {
let mut res = Response::new();
res.set_content_type("application/json");
res.append(serde_json::to_string(data).unwrap());
res
}
/// Gets the HTTP message for a given status.
fn get_http_message(status: u16) -> String {
let msg = match status {
100 => "Continue",
101 => "Switching Protocols",
200 => "OK",
201 => "Created",
202 => "Accepted",
203 => "Non-Authoritative Information",
204 => "No Content",
205 => "Reset Content",
206 => "Partial Content",
300 => "Multiple Choices",
301 => "Moved Permanently",
302 => "Found",
303 => "See Other",
304 => "Not Modified",
305 => "Use Proxy",
307 => "Temporary Redirect",
400 => "Bad Request",
401 => "Unauthorized",
402 => "Payment Required",
403 => "Forbidden",
404 => "Not Found",
405 => "Method Not Allowed",
406 => "Not Acceptable",
407 => "Proxy Authentication Required",
408 => "Request Time Out",
409 => "Conflict",
410 => "Gone",
411 => "Length Required",
412 => "Precondition Failed",
413 => "Request Entity Too Large",
414 => "Request-URI Too Large",
415 => "Unsupported Media Type",
416 => "Requested Range Not Satisfiable",
417 => "Expectation Failed",
500 => "Internal Server Error",
501 => "Not Implemented",
502 => "Bad Gateway",
503 => "Service Unavailable",
504 => "Gateway Time-out",
505 => "HTTP Version Not Supported",
_ => "OK",
};
String::from(msg)
}
/// Sets the response status for the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.set_status(200);
/// ```
pub fn set_status(&mut self, status: u16) {
self.status = status;
self.cmsg = Response::get_http_message(status);
}
/// Sets the Content-Type header for the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.set_content_type("text/html");
/// ```
pub fn set_content_type(&mut self, ctype: &str) {
self.ctype = String::from(ctype);
}
/// Adds a header to the HTTP response.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// res.add_header("Content-Type", "text/html");
/// ```
pub fn add_header(&mut self, key: &str, value: &str) {
if!self.headers.contains_key(key) {
self.headers.insert(String::from(key), String::from(value));
}
}
/// Appends data to the body of the HTTP response. The trait ToOutput must
/// be implemented for the type passed.
///
/// # Examples
///
/// ```rust
/// use canteen::Response;
///
/// let mut res = Response::new();
/// let data = "{ message: \"Hello, world!\" }";
/// res.append(data);
/// ```
pub fn append<T: ToOutput>(&mut self, payload: T) {
self.payload.extend(payload.to_output().iter());
}
/// Returns a byte array containing the full contents of the HTTP response,
/// for use by the Canteen struct.
pub fn gen_output(&self) -> Vec<u8> {
let mut output: Vec<u8> = Vec::with_capacity(self.payload.len() + 500);
let mut inter = String::new();
inter.push_str(&format!("HTTP/1.1 {} {}\r\n", self.status, self.cmsg));
for (key, value) in &self.headers {
inter.push_str(&format!("{}: {}\r\n", key, value));
}
inter.push_str(&format!("Content-Type: {}\r\n", self.ctype));
inter.push_str(&format!("Content-Length: {}\r\n", self.payload.len()));
inter.push_str("\r\n");
output.extend(inter.as_bytes());
output.extend(self.payload.iter());
output
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Serialize)]
struct Foo {
item: i32,
}
#[test]
fn test_response_as_json() {
let foo = Foo { item: 12345 };
let res_j = Response::as_json(&foo);
let mut res_r = Response::new();
res_r.set_content_type("application/json");
res_r.append(serde_json::to_string(&foo).unwrap());
assert_eq!(res_r.gen_output(), res_j.gen_output());
}
#[test]
fn test_response_http_message() {
assert_eq!("OK", Response::get_http_message(200));
}
#[test]
fn test_tooutput_trait_static_str() {
let ar: [u8; 3] = [97, 98, 99];
assert_eq!(ar, "abc".to_output());
}
#[test]
fn test_tooutput_trait_str() {
let ar: [u8; 3] = [97, 98, 99];
let st = "abc";
assert_eq!(ar, st.to_output());
}
#[test]
fn test_tooutput_trait_string() {
let ar: [u8; 3] = [97, 98, 99];
let st = String::from("abc");
assert_eq!(ar, st.to_output());
}
#[test]
fn test_tooutput_trait_vec() {
let ar: [u8; 5] = [1, 2, 3, 4, 5];
let vc: Vec<u8> = vec![1, 2, 3, 4, 5];
assert_eq!(ar, vc.to_output());
}
}
|
}
/// This struct reprsents the response to an HTTP client.
#[derive(Debug, Default)]
pub struct Response {
|
random_line_split
|
lock.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::fs;
use std::fs::File;
use std::io;
use std::path::Path;
use fs2::FileExt;
/// RAII lock on a filesystem path.
pub struct PathLock {
file: File,
}
impl PathLock {
/// Take an exclusive lock on `path`. The lock file will be created on
/// demand.
pub fn exclusive(path: &Path) -> io::Result<Self> {
let file = fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)?;
file.lock_exclusive()?;
Ok(PathLock { file })
}
}
impl Drop for PathLock {
fn
|
(&mut self) {
self.file.unlock().expect("unlock");
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::mpsc::channel;
use std::thread;
#[test]
fn test_path_lock() -> io::Result<()> {
let dir = tempfile::tempdir()?;
let path = dir.path().join("a");
let (tx, rx) = channel();
const N: usize = 50;
let threads: Vec<_> = (0..N)
.map(|i| {
let path = path.clone();
let tx = tx.clone();
thread::spawn(move || {
// Write 2 values that are the same, protected by the lock.
let _locked = PathLock::exclusive(&path);
tx.send(i).unwrap();
tx.send(i).unwrap();
})
})
.collect();
for thread in threads {
thread.join().expect("joined");
}
for _ in 0..N {
// Read 2 values. They should be the same.
let v1 = rx.recv().unwrap();
let v2 = rx.recv().unwrap();
assert_eq!(v1, v2);
}
Ok(())
}
}
|
drop
|
identifier_name
|
lock.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::fs;
use std::fs::File;
use std::io;
use std::path::Path;
use fs2::FileExt;
/// RAII lock on a filesystem path.
pub struct PathLock {
file: File,
}
impl PathLock {
/// Take an exclusive lock on `path`. The lock file will be created on
/// demand.
pub fn exclusive(path: &Path) -> io::Result<Self> {
let file = fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)?;
|
Ok(PathLock { file })
}
}
impl Drop for PathLock {
fn drop(&mut self) {
self.file.unlock().expect("unlock");
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::mpsc::channel;
use std::thread;
#[test]
fn test_path_lock() -> io::Result<()> {
let dir = tempfile::tempdir()?;
let path = dir.path().join("a");
let (tx, rx) = channel();
const N: usize = 50;
let threads: Vec<_> = (0..N)
.map(|i| {
let path = path.clone();
let tx = tx.clone();
thread::spawn(move || {
// Write 2 values that are the same, protected by the lock.
let _locked = PathLock::exclusive(&path);
tx.send(i).unwrap();
tx.send(i).unwrap();
})
})
.collect();
for thread in threads {
thread.join().expect("joined");
}
for _ in 0..N {
// Read 2 values. They should be the same.
let v1 = rx.recv().unwrap();
let v2 = rx.recv().unwrap();
assert_eq!(v1, v2);
}
Ok(())
}
}
|
file.lock_exclusive()?;
|
random_line_split
|
check_static_recursion.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use session::Session;
use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
use syntax::ast;
use syntax::{ast_util, ast_map};
use syntax::codemap::Span;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
match it.node {
ast::ItemStatic(_, _, ref expr) |
ast::ItemConst(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
visit::walk_expr(self, &*expr)
},
_ => visit::walk_item(self, it)
}
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
visit::walk_expr(self, &*expr)
}
}
_ => visit::walk_trait_item(self, ti)
}
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
match ii.node {
ast::ConstImplItem(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
visit::walk_expr(self, &*expr)
}
_ => visit::walk_impl_item(self, ii)
}
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_span: &'a Span,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
idstack: Vec<ast::NodeId>
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
fn new(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
idstack: Vec::new()
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) {
if self.idstack.iter().any(|x| x == &(id)) {
span_err!(self.sess, *self.root_span, E0265, "recursive constant");
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
}
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
|
ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
ast_map::NodeTraitItem(item) =>
self.visit_trait_item(item),
ast_map::NodeImplItem(item) =>
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
span_err!(self.sess, e.span, E0266,
"expected item, found {}",
self.ast_map.node_to_string(def_id.node));
return;
},
}
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
|
Some(DefAssociatedConst(def_id, _)) |
Some(DefConst(def_id)) if
|
random_line_split
|
check_static_recursion.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use session::Session;
use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
use syntax::ast;
use syntax::{ast_util, ast_map};
use syntax::codemap::Span;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
match it.node {
ast::ItemStatic(_, _, ref expr) |
ast::ItemConst(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
visit::walk_expr(self, &*expr)
},
_ => visit::walk_item(self, it)
}
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
visit::walk_expr(self, &*expr)
}
}
_ => visit::walk_trait_item(self, ti)
}
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
match ii.node {
ast::ConstImplItem(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
visit::walk_expr(self, &*expr)
}
_ => visit::walk_impl_item(self, ii)
}
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_span: &'a Span,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
idstack: Vec<ast::NodeId>
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
fn
|
(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
idstack: Vec::new()
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) {
if self.idstack.iter().any(|x| x == &(id)) {
span_err!(self.sess, *self.root_span, E0265, "recursive constant");
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
}
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
Some(DefAssociatedConst(def_id, _)) |
Some(DefConst(def_id)) if
ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
ast_map::NodeTraitItem(item) =>
self.visit_trait_item(item),
ast_map::NodeImplItem(item) =>
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
span_err!(self.sess, e.span, E0266,
"expected item, found {}",
self.ast_map.node_to_string(def_id.node));
return;
},
}
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
|
new
|
identifier_name
|
check_static_recursion.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use session::Session;
use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefMap};
use syntax::ast;
use syntax::{ast_util, ast_map};
use syntax::codemap::Span;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
match it.node {
ast::ItemStatic(_, _, ref expr) |
ast::ItemConst(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &it.span);
recursion_visitor.visit_item(it);
visit::walk_expr(self, &*expr)
},
_ => visit::walk_item(self, it)
}
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
match ti.node {
ast::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ti.span);
recursion_visitor.visit_trait_item(ti);
visit::walk_expr(self, &*expr)
}
}
_ => visit::walk_trait_item(self, ti)
}
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
match ii.node {
ast::ConstImplItem(_, ref expr) => {
let mut recursion_visitor =
CheckItemRecursionVisitor::new(self, &ii.span);
recursion_visitor.visit_impl_item(ii);
visit::walk_expr(self, &*expr)
}
_ => visit::walk_impl_item(self, ii)
}
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &ast::Crate,
def_map: &DefMap,
ast_map: &ast_map::Map<'ast>)
|
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_span: &'a Span,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a DefMap,
idstack: Vec<ast::NodeId>
}
impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> {
fn new(v: &CheckCrateVisitor<'a, 'ast>, span: &'a Span)
-> CheckItemRecursionVisitor<'a, 'ast> {
CheckItemRecursionVisitor {
root_span: span,
sess: v.sess,
ast_map: v.ast_map,
def_map: v.def_map,
idstack: Vec::new()
}
}
fn with_item_id_pushed<F>(&mut self, id: ast::NodeId, f: F)
where F: Fn(&mut Self) {
if self.idstack.iter().any(|x| x == &(id)) {
span_err!(self.sess, *self.root_span, E0265, "recursive constant");
return;
}
self.idstack.push(id);
f(self);
self.idstack.pop();
}
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &ast::Item) {
self.with_item_id_pushed(it.id, |v| visit::walk_item(v, it));
}
fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
self.with_item_id_pushed(ti.id, |v| visit::walk_trait_item(v, ti));
}
fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
self.with_item_id_pushed(ii.id, |v| visit::walk_impl_item(v, ii));
}
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
ast::ExprPath(..) => {
match self.def_map.borrow().get(&e.id).map(|d| d.base_def) {
Some(DefStatic(def_id, _)) |
Some(DefAssociatedConst(def_id, _)) |
Some(DefConst(def_id)) if
ast_util::is_local(def_id) => {
match self.ast_map.get(def_id.node) {
ast_map::NodeItem(item) =>
self.visit_item(item),
ast_map::NodeTraitItem(item) =>
self.visit_trait_item(item),
ast_map::NodeImplItem(item) =>
self.visit_impl_item(item),
ast_map::NodeForeignItem(_) => {},
_ => {
span_err!(self.sess, e.span, E0266,
"expected item, found {}",
self.ast_map.node_to_string(def_id.node));
return;
},
}
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
|
{
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
|
identifier_body
|
linux.rs
|
use crate::{error::{Error,
Result},
os::system::Uname};
use errno::errno;
use std::{ffi::CStr,
mem};
pub fn uname() -> Result<Uname> { unsafe { uname_libc() } }
unsafe fn uname_libc() -> Result<Uname> {
let mut utsname = mem::MaybeUninit::uninit();
let rv = libc::uname(utsname.as_mut_ptr());
let utsname = utsname.assume_init();
if rv < 0 {
let errno = errno();
|
code, errno)));
}
Ok(Uname { sys_name: CStr::from_ptr(utsname.sysname.as_ptr()).to_string_lossy()
.into_owned(),
node_name: CStr::from_ptr(utsname.nodename.as_ptr()).to_string_lossy()
.into_owned(),
release: CStr::from_ptr(utsname.release.as_ptr()).to_string_lossy()
.into_owned(),
version: CStr::from_ptr(utsname.version.as_ptr()).to_string_lossy()
.into_owned(),
machine: CStr::from_ptr(utsname.machine.as_ptr()).to_string_lossy()
.into_owned(), })
}
|
let code = errno.0 as i32;
return Err(Error::UnameFailed(format!("Error {} when calling uname: \
{}",
|
random_line_split
|
linux.rs
|
use crate::{error::{Error,
Result},
os::system::Uname};
use errno::errno;
use std::{ffi::CStr,
mem};
pub fn uname() -> Result<Uname> { unsafe { uname_libc() } }
unsafe fn
|
() -> Result<Uname> {
let mut utsname = mem::MaybeUninit::uninit();
let rv = libc::uname(utsname.as_mut_ptr());
let utsname = utsname.assume_init();
if rv < 0 {
let errno = errno();
let code = errno.0 as i32;
return Err(Error::UnameFailed(format!("Error {} when calling uname: \
{}",
code, errno)));
}
Ok(Uname { sys_name: CStr::from_ptr(utsname.sysname.as_ptr()).to_string_lossy()
.into_owned(),
node_name: CStr::from_ptr(utsname.nodename.as_ptr()).to_string_lossy()
.into_owned(),
release: CStr::from_ptr(utsname.release.as_ptr()).to_string_lossy()
.into_owned(),
version: CStr::from_ptr(utsname.version.as_ptr()).to_string_lossy()
.into_owned(),
machine: CStr::from_ptr(utsname.machine.as_ptr()).to_string_lossy()
.into_owned(), })
}
|
uname_libc
|
identifier_name
|
linux.rs
|
use crate::{error::{Error,
Result},
os::system::Uname};
use errno::errno;
use std::{ffi::CStr,
mem};
pub fn uname() -> Result<Uname>
|
unsafe fn uname_libc() -> Result<Uname> {
let mut utsname = mem::MaybeUninit::uninit();
let rv = libc::uname(utsname.as_mut_ptr());
let utsname = utsname.assume_init();
if rv < 0 {
let errno = errno();
let code = errno.0 as i32;
return Err(Error::UnameFailed(format!("Error {} when calling uname: \
{}",
code, errno)));
}
Ok(Uname { sys_name: CStr::from_ptr(utsname.sysname.as_ptr()).to_string_lossy()
.into_owned(),
node_name: CStr::from_ptr(utsname.nodename.as_ptr()).to_string_lossy()
.into_owned(),
release: CStr::from_ptr(utsname.release.as_ptr()).to_string_lossy()
.into_owned(),
version: CStr::from_ptr(utsname.version.as_ptr()).to_string_lossy()
.into_owned(),
machine: CStr::from_ptr(utsname.machine.as_ptr()).to_string_lossy()
.into_owned(), })
}
|
{ unsafe { uname_libc() } }
|
identifier_body
|
linux.rs
|
use crate::{error::{Error,
Result},
os::system::Uname};
use errno::errno;
use std::{ffi::CStr,
mem};
pub fn uname() -> Result<Uname> { unsafe { uname_libc() } }
unsafe fn uname_libc() -> Result<Uname> {
let mut utsname = mem::MaybeUninit::uninit();
let rv = libc::uname(utsname.as_mut_ptr());
let utsname = utsname.assume_init();
if rv < 0
|
Ok(Uname { sys_name: CStr::from_ptr(utsname.sysname.as_ptr()).to_string_lossy()
.into_owned(),
node_name: CStr::from_ptr(utsname.nodename.as_ptr()).to_string_lossy()
.into_owned(),
release: CStr::from_ptr(utsname.release.as_ptr()).to_string_lossy()
.into_owned(),
version: CStr::from_ptr(utsname.version.as_ptr()).to_string_lossy()
.into_owned(),
machine: CStr::from_ptr(utsname.machine.as_ptr()).to_string_lossy()
.into_owned(), })
}
|
{
let errno = errno();
let code = errno.0 as i32;
return Err(Error::UnameFailed(format!("Error {} when calling uname: \
{}",
code, errno)));
}
|
conditional_block
|
regex.rs
|
// Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Error;
use nfa::{Nfa, NoLooks};
use runner::anchored::AnchoredEngine;
use runner::forward_backward::{ForwardBackwardEngine, Prefix};
use runner::Engine;
use std;
use std::fmt::Debug;
#[derive(Debug)]
pub struct Regex {
engine: Box<Engine<u8>>,
}
// An engine that doesn't match anything.
#[derive(Clone, Debug)]
struct EmptyEngine;
impl<Ret: Debug> Engine<Ret> for EmptyEngine {
fn find(&self, _: &str) -> Option<(usize, usize, Ret)> { None }
fn clone_box(&self) -> Box<Engine<Ret>> { Box::new(EmptyEngine) }
}
impl Clone for Regex {
fn clone(&self) -> Regex {
Regex {
engine: self.engine.clone_box(),
}
}
}
impl Regex {
/// Creates a new `Regex` from a regular expression string.
pub fn new(re: &str) -> ::Result<Regex> {
Regex::new_bounded(re, std::usize::MAX)
}
/// Creates a new `Regex` from a regular expression string, but only if it doesn't require too
/// many states.
pub fn new_bounded(re: &str, max_states: usize) -> ::Result<Regex> {
let nfa = try!(Nfa::from_regex(re));
let nfa = nfa.remove_looks();
let eng = if nfa.is_empty() {
Box::new(EmptyEngine) as Box<Engine<u8>>
} else if nfa.is_anchored() {
Box::new(try!(Regex::make_anchored(nfa, max_states))) as Box<Engine<u8>>
} else {
Box::new(try!(Regex::make_forward_backward(nfa, max_states))) as Box<Engine<u8>>
};
Ok(Regex { engine: eng })
}
fn make_anchored(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<AnchoredEngine<u8>> {
let nfa = try!(nfa.byte_me(max_states));
let dfa = try!(nfa.determinize(max_states))
.optimize()
.map_ret(|(_, bytes)| bytes);
let prog = dfa.compile();
Ok(AnchoredEngine::new(prog))
}
fn make_forward_backward(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<ForwardBackwardEngine<u8>> {
if nfa.is_anchored() {
return Err(Error::InvalidEngine("anchors rule out the forward-backward engine"));
}
let f_nfa = try!(try!(nfa.clone().byte_me(max_states)).anchor(max_states));
let b_nfa = try!(try!(nfa.byte_me(max_states)).reverse(max_states));
let f_dfa = try!(f_nfa.determinize(max_states)).optimize();
let b_dfa = try!(b_nfa.determinize_longest(max_states)).optimize();
let b_dfa = b_dfa.map_ret(|(_, bytes)| bytes);
let b_prog = b_dfa.compile();
let f_dfa = f_dfa.map_ret(|(look, bytes)| {
let b_dfa_state = b_dfa.init[look.as_usize()].expect("BUG: back dfa must have this init");
(b_dfa_state, bytes)
});
let mut f_prog = f_dfa.compile();
let prefix = Prefix::from_parts(f_dfa.prefix_strings());
match prefix {
Prefix::Empty => {},
_ => {
// If there is a non-trivial prefix, we can usually speed up matching by deleting
// transitions that return to the start state. That way, instead of returning to
// the start state, we will just fail to match. Then we get to search for the
// prefix before trying to match again.
let f_dfa = f_dfa.cut_loop_to_init().optimize();
f_prog = f_dfa.compile();
},
}
Ok(ForwardBackwardEngine::new(f_prog, prefix, b_prog))
}
/// Returns the index range of the first match, if there is a match. The indices returned are
/// byte indices of the string. The first index is inclusive; the second is exclusive.
pub fn find(&self, s: &str) -> Option<(usize, usize)> {
if let Some((start, end, look_behind)) = self.engine.find(s) {
Some((start + look_behind as usize, end))
} else
|
}
pub fn is_match(&self, s: &str) -> bool {
// TODO: for the forward-backward engine, this could be faster because we don't need
// to run backward.
self.find(s).is_some()
}
}
|
{
None
}
|
conditional_block
|
regex.rs
|
// Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Error;
use nfa::{Nfa, NoLooks};
use runner::anchored::AnchoredEngine;
use runner::forward_backward::{ForwardBackwardEngine, Prefix};
use runner::Engine;
use std;
use std::fmt::Debug;
#[derive(Debug)]
pub struct Regex {
engine: Box<Engine<u8>>,
}
// An engine that doesn't match anything.
#[derive(Clone, Debug)]
struct EmptyEngine;
impl<Ret: Debug> Engine<Ret> for EmptyEngine {
fn find(&self, _: &str) -> Option<(usize, usize, Ret)> { None }
fn clone_box(&self) -> Box<Engine<Ret>> { Box::new(EmptyEngine) }
}
impl Clone for Regex {
fn clone(&self) -> Regex {
Regex {
engine: self.engine.clone_box(),
}
}
}
impl Regex {
/// Creates a new `Regex` from a regular expression string.
|
Regex::new_bounded(re, std::usize::MAX)
}
/// Creates a new `Regex` from a regular expression string, but only if it doesn't require too
/// many states.
pub fn new_bounded(re: &str, max_states: usize) -> ::Result<Regex> {
let nfa = try!(Nfa::from_regex(re));
let nfa = nfa.remove_looks();
let eng = if nfa.is_empty() {
Box::new(EmptyEngine) as Box<Engine<u8>>
} else if nfa.is_anchored() {
Box::new(try!(Regex::make_anchored(nfa, max_states))) as Box<Engine<u8>>
} else {
Box::new(try!(Regex::make_forward_backward(nfa, max_states))) as Box<Engine<u8>>
};
Ok(Regex { engine: eng })
}
fn make_anchored(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<AnchoredEngine<u8>> {
let nfa = try!(nfa.byte_me(max_states));
let dfa = try!(nfa.determinize(max_states))
.optimize()
.map_ret(|(_, bytes)| bytes);
let prog = dfa.compile();
Ok(AnchoredEngine::new(prog))
}
fn make_forward_backward(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<ForwardBackwardEngine<u8>> {
if nfa.is_anchored() {
return Err(Error::InvalidEngine("anchors rule out the forward-backward engine"));
}
let f_nfa = try!(try!(nfa.clone().byte_me(max_states)).anchor(max_states));
let b_nfa = try!(try!(nfa.byte_me(max_states)).reverse(max_states));
let f_dfa = try!(f_nfa.determinize(max_states)).optimize();
let b_dfa = try!(b_nfa.determinize_longest(max_states)).optimize();
let b_dfa = b_dfa.map_ret(|(_, bytes)| bytes);
let b_prog = b_dfa.compile();
let f_dfa = f_dfa.map_ret(|(look, bytes)| {
let b_dfa_state = b_dfa.init[look.as_usize()].expect("BUG: back dfa must have this init");
(b_dfa_state, bytes)
});
let mut f_prog = f_dfa.compile();
let prefix = Prefix::from_parts(f_dfa.prefix_strings());
match prefix {
Prefix::Empty => {},
_ => {
// If there is a non-trivial prefix, we can usually speed up matching by deleting
// transitions that return to the start state. That way, instead of returning to
// the start state, we will just fail to match. Then we get to search for the
// prefix before trying to match again.
let f_dfa = f_dfa.cut_loop_to_init().optimize();
f_prog = f_dfa.compile();
},
}
Ok(ForwardBackwardEngine::new(f_prog, prefix, b_prog))
}
/// Returns the index range of the first match, if there is a match. The indices returned are
/// byte indices of the string. The first index is inclusive; the second is exclusive.
pub fn find(&self, s: &str) -> Option<(usize, usize)> {
if let Some((start, end, look_behind)) = self.engine.find(s) {
Some((start + look_behind as usize, end))
} else {
None
}
}
pub fn is_match(&self, s: &str) -> bool {
// TODO: for the forward-backward engine, this could be faster because we don't need
// to run backward.
self.find(s).is_some()
}
}
|
pub fn new(re: &str) -> ::Result<Regex> {
|
random_line_split
|
regex.rs
|
// Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Error;
use nfa::{Nfa, NoLooks};
use runner::anchored::AnchoredEngine;
use runner::forward_backward::{ForwardBackwardEngine, Prefix};
use runner::Engine;
use std;
use std::fmt::Debug;
#[derive(Debug)]
pub struct Regex {
engine: Box<Engine<u8>>,
}
// An engine that doesn't match anything.
#[derive(Clone, Debug)]
struct EmptyEngine;
impl<Ret: Debug> Engine<Ret> for EmptyEngine {
fn find(&self, _: &str) -> Option<(usize, usize, Ret)> { None }
fn clone_box(&self) -> Box<Engine<Ret>> { Box::new(EmptyEngine) }
}
impl Clone for Regex {
fn clone(&self) -> Regex {
Regex {
engine: self.engine.clone_box(),
}
}
}
impl Regex {
/// Creates a new `Regex` from a regular expression string.
pub fn new(re: &str) -> ::Result<Regex> {
Regex::new_bounded(re, std::usize::MAX)
}
/// Creates a new `Regex` from a regular expression string, but only if it doesn't require too
/// many states.
pub fn new_bounded(re: &str, max_states: usize) -> ::Result<Regex> {
let nfa = try!(Nfa::from_regex(re));
let nfa = nfa.remove_looks();
let eng = if nfa.is_empty() {
Box::new(EmptyEngine) as Box<Engine<u8>>
} else if nfa.is_anchored() {
Box::new(try!(Regex::make_anchored(nfa, max_states))) as Box<Engine<u8>>
} else {
Box::new(try!(Regex::make_forward_backward(nfa, max_states))) as Box<Engine<u8>>
};
Ok(Regex { engine: eng })
}
fn make_anchored(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<AnchoredEngine<u8>> {
let nfa = try!(nfa.byte_me(max_states));
let dfa = try!(nfa.determinize(max_states))
.optimize()
.map_ret(|(_, bytes)| bytes);
let prog = dfa.compile();
Ok(AnchoredEngine::new(prog))
}
fn make_forward_backward(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<ForwardBackwardEngine<u8>> {
if nfa.is_anchored() {
return Err(Error::InvalidEngine("anchors rule out the forward-backward engine"));
}
let f_nfa = try!(try!(nfa.clone().byte_me(max_states)).anchor(max_states));
let b_nfa = try!(try!(nfa.byte_me(max_states)).reverse(max_states));
let f_dfa = try!(f_nfa.determinize(max_states)).optimize();
let b_dfa = try!(b_nfa.determinize_longest(max_states)).optimize();
let b_dfa = b_dfa.map_ret(|(_, bytes)| bytes);
let b_prog = b_dfa.compile();
let f_dfa = f_dfa.map_ret(|(look, bytes)| {
let b_dfa_state = b_dfa.init[look.as_usize()].expect("BUG: back dfa must have this init");
(b_dfa_state, bytes)
});
let mut f_prog = f_dfa.compile();
let prefix = Prefix::from_parts(f_dfa.prefix_strings());
match prefix {
Prefix::Empty => {},
_ => {
// If there is a non-trivial prefix, we can usually speed up matching by deleting
// transitions that return to the start state. That way, instead of returning to
// the start state, we will just fail to match. Then we get to search for the
// prefix before trying to match again.
let f_dfa = f_dfa.cut_loop_to_init().optimize();
f_prog = f_dfa.compile();
},
}
Ok(ForwardBackwardEngine::new(f_prog, prefix, b_prog))
}
/// Returns the index range of the first match, if there is a match. The indices returned are
/// byte indices of the string. The first index is inclusive; the second is exclusive.
pub fn find(&self, s: &str) -> Option<(usize, usize)> {
if let Some((start, end, look_behind)) = self.engine.find(s) {
Some((start + look_behind as usize, end))
} else {
None
}
}
pub fn is_match(&self, s: &str) -> bool
|
}
|
{
// TODO: for the forward-backward engine, this could be faster because we don't need
// to run backward.
self.find(s).is_some()
}
|
identifier_body
|
regex.rs
|
// Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Error;
use nfa::{Nfa, NoLooks};
use runner::anchored::AnchoredEngine;
use runner::forward_backward::{ForwardBackwardEngine, Prefix};
use runner::Engine;
use std;
use std::fmt::Debug;
#[derive(Debug)]
pub struct Regex {
engine: Box<Engine<u8>>,
}
// An engine that doesn't match anything.
#[derive(Clone, Debug)]
struct EmptyEngine;
impl<Ret: Debug> Engine<Ret> for EmptyEngine {
fn find(&self, _: &str) -> Option<(usize, usize, Ret)> { None }
fn clone_box(&self) -> Box<Engine<Ret>> { Box::new(EmptyEngine) }
}
impl Clone for Regex {
fn clone(&self) -> Regex {
Regex {
engine: self.engine.clone_box(),
}
}
}
impl Regex {
/// Creates a new `Regex` from a regular expression string.
pub fn new(re: &str) -> ::Result<Regex> {
Regex::new_bounded(re, std::usize::MAX)
}
/// Creates a new `Regex` from a regular expression string, but only if it doesn't require too
/// many states.
pub fn new_bounded(re: &str, max_states: usize) -> ::Result<Regex> {
let nfa = try!(Nfa::from_regex(re));
let nfa = nfa.remove_looks();
let eng = if nfa.is_empty() {
Box::new(EmptyEngine) as Box<Engine<u8>>
} else if nfa.is_anchored() {
Box::new(try!(Regex::make_anchored(nfa, max_states))) as Box<Engine<u8>>
} else {
Box::new(try!(Regex::make_forward_backward(nfa, max_states))) as Box<Engine<u8>>
};
Ok(Regex { engine: eng })
}
fn make_anchored(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<AnchoredEngine<u8>> {
let nfa = try!(nfa.byte_me(max_states));
let dfa = try!(nfa.determinize(max_states))
.optimize()
.map_ret(|(_, bytes)| bytes);
let prog = dfa.compile();
Ok(AnchoredEngine::new(prog))
}
fn
|
(nfa: Nfa<u32, NoLooks>, max_states: usize)
-> ::Result<ForwardBackwardEngine<u8>> {
if nfa.is_anchored() {
return Err(Error::InvalidEngine("anchors rule out the forward-backward engine"));
}
let f_nfa = try!(try!(nfa.clone().byte_me(max_states)).anchor(max_states));
let b_nfa = try!(try!(nfa.byte_me(max_states)).reverse(max_states));
let f_dfa = try!(f_nfa.determinize(max_states)).optimize();
let b_dfa = try!(b_nfa.determinize_longest(max_states)).optimize();
let b_dfa = b_dfa.map_ret(|(_, bytes)| bytes);
let b_prog = b_dfa.compile();
let f_dfa = f_dfa.map_ret(|(look, bytes)| {
let b_dfa_state = b_dfa.init[look.as_usize()].expect("BUG: back dfa must have this init");
(b_dfa_state, bytes)
});
let mut f_prog = f_dfa.compile();
let prefix = Prefix::from_parts(f_dfa.prefix_strings());
match prefix {
Prefix::Empty => {},
_ => {
// If there is a non-trivial prefix, we can usually speed up matching by deleting
// transitions that return to the start state. That way, instead of returning to
// the start state, we will just fail to match. Then we get to search for the
// prefix before trying to match again.
let f_dfa = f_dfa.cut_loop_to_init().optimize();
f_prog = f_dfa.compile();
},
}
Ok(ForwardBackwardEngine::new(f_prog, prefix, b_prog))
}
/// Returns the index range of the first match, if there is a match. The indices returned are
/// byte indices of the string. The first index is inclusive; the second is exclusive.
pub fn find(&self, s: &str) -> Option<(usize, usize)> {
if let Some((start, end, look_behind)) = self.engine.find(s) {
Some((start + look_behind as usize, end))
} else {
None
}
}
pub fn is_match(&self, s: &str) -> bool {
// TODO: for the forward-backward engine, this could be faster because we don't need
// to run backward.
self.find(s).is_some()
}
}
|
make_forward_backward
|
identifier_name
|
chip.rs
|
use common::{RingBuffer,Queue};
use ast;
//use adc;
use dma;
use nvic;
use usart;
use spi;
use gpio;
pub struct Sam4l;
const IQ_SIZE: usize = 100;
static mut IQ_BUF : [nvic::NvicIdx; IQ_SIZE] =
[nvic::NvicIdx::HFLASHC; IQ_SIZE];
pub static mut INTERRUPT_QUEUE : Option<RingBuffer<'static, nvic::NvicIdx>> = None;
impl Sam4l {
#[inline(never)]
pub unsafe fn new() -> Sam4l {
INTERRUPT_QUEUE = Some(RingBuffer::new(&mut IQ_BUF));
usart::USART3.set_dma(&mut dma::DMAChannels[0]);
dma::DMAChannels[0].client = Some(&mut usart::USART3);
spi::SPI.set_dma(&mut dma::DMAChannels[1], &mut dma::DMAChannels[2]);
dma::DMAChannels[1].client = Some(&mut spi::SPI);
dma::DMAChannels[2].client = Some(&mut spi::SPI);
Sam4l
}
pub unsafe fn service_pending_interrupts(&mut self) {
use nvic::NvicIdx::*;
INTERRUPT_QUEUE.as_mut().unwrap().dequeue().map(|interrupt| {
match interrupt {
ASTALARM => ast::AST.handle_interrupt(),
USART3 => usart::USART3.handle_interrupt(),
PDCA0 => dma::DMAChannels[0].handle_interrupt(),
PDCA1 => dma::DMAChannels[1].handle_interrupt(),
PDCA2 => dma::DMAChannels[2].handle_interrupt(),
|
GPIO0 => gpio::PA.handle_interrupt(),
GPIO1 => gpio::PA.handle_interrupt(),
GPIO2 => gpio::PA.handle_interrupt(),
GPIO3 => gpio::PA.handle_interrupt(),
GPIO4 => gpio::PB.handle_interrupt(),
GPIO5 => gpio::PB.handle_interrupt(),
GPIO6 => gpio::PB.handle_interrupt(),
GPIO7 => gpio::PB.handle_interrupt(),
GPIO8 => gpio::PC.handle_interrupt(),
GPIO9 => gpio::PC.handle_interrupt(),
GPIO10 => gpio::PC.handle_interrupt(),
GPIO11 => gpio::PC.handle_interrupt(),
//NvicIdx::ADCIFE => self.adc.handle_interrupt(),
_ => {}
}
nvic::enable(interrupt);
});
}
pub unsafe fn has_pending_interrupts(&mut self) -> bool {
INTERRUPT_QUEUE.as_mut().unwrap().has_elements()
}
}
|
random_line_split
|
|
chip.rs
|
use common::{RingBuffer,Queue};
use ast;
//use adc;
use dma;
use nvic;
use usart;
use spi;
use gpio;
pub struct
|
;
const IQ_SIZE: usize = 100;
static mut IQ_BUF : [nvic::NvicIdx; IQ_SIZE] =
[nvic::NvicIdx::HFLASHC; IQ_SIZE];
pub static mut INTERRUPT_QUEUE : Option<RingBuffer<'static, nvic::NvicIdx>> = None;
impl Sam4l {
#[inline(never)]
pub unsafe fn new() -> Sam4l {
INTERRUPT_QUEUE = Some(RingBuffer::new(&mut IQ_BUF));
usart::USART3.set_dma(&mut dma::DMAChannels[0]);
dma::DMAChannels[0].client = Some(&mut usart::USART3);
spi::SPI.set_dma(&mut dma::DMAChannels[1], &mut dma::DMAChannels[2]);
dma::DMAChannels[1].client = Some(&mut spi::SPI);
dma::DMAChannels[2].client = Some(&mut spi::SPI);
Sam4l
}
pub unsafe fn service_pending_interrupts(&mut self) {
use nvic::NvicIdx::*;
INTERRUPT_QUEUE.as_mut().unwrap().dequeue().map(|interrupt| {
match interrupt {
ASTALARM => ast::AST.handle_interrupt(),
USART3 => usart::USART3.handle_interrupt(),
PDCA0 => dma::DMAChannels[0].handle_interrupt(),
PDCA1 => dma::DMAChannels[1].handle_interrupt(),
PDCA2 => dma::DMAChannels[2].handle_interrupt(),
GPIO0 => gpio::PA.handle_interrupt(),
GPIO1 => gpio::PA.handle_interrupt(),
GPIO2 => gpio::PA.handle_interrupt(),
GPIO3 => gpio::PA.handle_interrupt(),
GPIO4 => gpio::PB.handle_interrupt(),
GPIO5 => gpio::PB.handle_interrupt(),
GPIO6 => gpio::PB.handle_interrupt(),
GPIO7 => gpio::PB.handle_interrupt(),
GPIO8 => gpio::PC.handle_interrupt(),
GPIO9 => gpio::PC.handle_interrupt(),
GPIO10 => gpio::PC.handle_interrupt(),
GPIO11 => gpio::PC.handle_interrupt(),
//NvicIdx::ADCIFE => self.adc.handle_interrupt(),
_ => {}
}
nvic::enable(interrupt);
});
}
pub unsafe fn has_pending_interrupts(&mut self) -> bool {
INTERRUPT_QUEUE.as_mut().unwrap().has_elements()
}
}
|
Sam4l
|
identifier_name
|
exceptions.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Helpers for generated code using exceptions
use crate::thrift_protocol::MessageType;
/// This trait should be implemented for each individual exception type. It will typically be generated.
pub trait ExceptionInfo {
/// Exception name
fn exn_name(&self) -> &'static str {
std::any::type_name::<Self>()
}
// Exception value
fn exn_value(&self) -> String;
/// Is a declared exception
fn exn_is_declared(&self) -> bool;
}
/// An extension of ExceptionInfo that also includes successful results.
/// This is implemented on generated *Exn types.
pub trait ResultInfo: ExceptionInfo {
fn result_type(&self) -> ResultType;
}
/// Classify a result from a specific method call.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ResultType {
/// A successful return
Return,
/// A declared exception
Error,
/// Some other exception (eg ApplicationException)
Exception,
}
impl ResultType {
pub fn
|
(&self) -> MessageType {
match self {
ResultType::Return | ResultType::Error => MessageType::Reply,
ResultType::Exception => MessageType::Exception,
}
}
}
|
message_type
|
identifier_name
|
exceptions.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Helpers for generated code using exceptions
use crate::thrift_protocol::MessageType;
/// This trait should be implemented for each individual exception type. It will typically be generated.
pub trait ExceptionInfo {
/// Exception name
fn exn_name(&self) -> &'static str {
std::any::type_name::<Self>()
}
// Exception value
fn exn_value(&self) -> String;
/// Is a declared exception
fn exn_is_declared(&self) -> bool;
}
/// An extension of ExceptionInfo that also includes successful results.
/// This is implemented on generated *Exn types.
pub trait ResultInfo: ExceptionInfo {
fn result_type(&self) -> ResultType;
}
/// Classify a result from a specific method call.
|
Return,
/// A declared exception
Error,
/// Some other exception (eg ApplicationException)
Exception,
}
impl ResultType {
pub fn message_type(&self) -> MessageType {
match self {
ResultType::Return | ResultType::Error => MessageType::Reply,
ResultType::Exception => MessageType::Exception,
}
}
}
|
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ResultType {
/// A successful return
|
random_line_split
|
cargo_compile.rs
|
Shell out to `--do get` for each source, and build up the list of paths
//! to pass to rustc -L
//! 5. Call `cargo-rustc` with the results of the resolver zipped together with
//! the results of the `get`
//!
//! a. Topologically sort the dependencies
//! b. Compile each dependency in order, passing in the -L's pointing at each
//! previously compiled dependency
//!
use std::collections::HashMap;
use std::default::Default;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use core::registry::PackageRegistry;
use core::{Source, SourceId, PackageSet, Package, Target, PackageId};
use core::{Profile, TargetKind};
use core::resolver::Method;
use ops::{self, BuildOutput, ExecEngine};
use sources::{PathSource};
use util::config::{ConfigValue, Config};
use util::{CargoResult, internal, human, ChainError, profile};
/// Contains information about how a package should be compiled.
pub struct CompileOptions<'a> {
pub config: &'a Config,
/// Number of concurrent jobs to use.
pub jobs: Option<u32>,
/// The target platform to compile for (example: `i686-unknown-linux-gnu`).
pub target: Option<&'a str>,
/// Extra features to build for the root package
pub features: &'a [String],
/// Flag if the default feature should be built for the root package
pub no_default_features: bool,
/// Root package to build (if None it's the current one)
pub spec: Option<&'a str>,
/// Filter to apply to the root package to select which targets will be
/// built.
pub filter: CompileFilter<'a>,
/// Engine which drives compilation
pub exec_engine: Option<Arc<Box<ExecEngine>>>,
/// Whether this is a release build or not
pub release: bool,
/// Mode for this compile.
pub mode: CompileMode,
/// The specified target will be compiled with all the available arguments,
/// note that this only accounts for the *final* invocation of rustc
pub target_rustc_args: Option<&'a [String]>,
}
#[derive(Clone, Copy, PartialEq)]
pub enum CompileMode {
Test,
Build,
Bench,
Doc { deps: bool },
}
pub enum CompileFilter<'a> {
Everything,
Only {
lib: bool,
bins: &'a [String],
examples: &'a [String],
tests: &'a [String],
benches: &'a [String],
}
}
pub fn compile<'a>(manifest_path: &Path,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
debug!("compile; manifest-path={}", manifest_path.display());
let mut source = try!(PathSource::for_path(manifest_path.parent().unwrap(),
options.config));
try!(source.update());
// TODO: Move this into PathSource
let package = try!(source.root_package());
debug!("loaded package; package={}", package);
for key in package.manifest().warnings().iter() {
try!(options.config.shell().warn(key))
}
compile_pkg(&package, Some(Box::new(source)), options)
}
pub fn compile_pkg<'a>(package: &Package,
source: Option<Box<Source + 'a>>,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
let CompileOptions { config, jobs, target, spec, features,
no_default_features, release, mode,
ref filter, ref exec_engine,
ref target_rustc_args } = *options;
let target = target.map(|s| s.to_string());
let features = features.iter().flat_map(|s| {
s.split(' ')
}).map(|s| s.to_string()).collect::<Vec<String>>();
if spec.is_some() && (no_default_features || features.len() > 0) {
return Err(human("features cannot be modified when the main package \
is not being built"))
}
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let override_ids = try!(source_ids_from_config(config, package.root()));
let (packages, resolve_with_overrides, sources) = {
let mut registry = PackageRegistry::new(config);
if let Some(source) = source {
registry.preload(package.package_id().source_id(), source);
} else {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
}
// First, resolve the package's *listed* dependencies, as well as
// downloading and updating all remotes and such.
let resolve = try!(ops::resolve_pkg(&mut registry, package));
// Second, resolve with precisely what we're doing. Filter out
// transitive dependencies if necessary, specify features, handle
// overrides, etc.
let _p = profile::start("resolving w/ overrides...");
try!(registry.add_overrides(override_ids));
let method = Method::Required {
dev_deps: true, // TODO: remove this option?
features: &features,
uses_default_features:!no_default_features,
};
let resolved_with_overrides =
try!(ops::resolve_with_previous(&mut registry, package, method,
Some(&resolve), None));
let req: Vec<PackageId> = resolved_with_overrides.iter().map(|r| {
r.clone()
}).collect();
let packages = try!(registry.get(&req).chain_error(|| {
human("Unable to get packages from source")
}));
(packages, resolved_with_overrides, registry.move_sources())
};
let pkgid = match spec {
Some(spec) => try!(resolve_with_overrides.query(spec)),
None => package.package_id(),
};
let to_build = packages.iter().find(|p| p.package_id() == pkgid).unwrap();
let targets = try!(generate_targets(to_build, mode, filter, release));
let target_with_args = match *target_rustc_args {
Some(args) if targets.len() == 1 => {
let (target, profile) = targets[0];
let mut profile = profile.clone();
profile.rustc_args = Some(args.to_vec());
Some((target, profile))
}
Some(_) => {
return Err(human("extra arguments to `rustc` can only be passed to \
one target, consider filtering\nthe package by \
passing e.g. `--lib` or `--bin NAME` to specify \
a single target"))
}
None => None,
};
let targets = target_with_args.as_ref().map(|&(t, ref p)| vec![(t, p)])
.unwrap_or(targets);
let ret = {
let _p = profile::start("compiling");
let mut build_config = try!(scrape_build_config(config, jobs, target));
build_config.exec_engine = exec_engine.clone();
build_config.release = release;
if let CompileMode::Doc { deps } = mode {
build_config.doc_all = deps;
}
try!(ops::compile_targets(&targets, to_build,
&PackageSet::new(&packages),
&resolve_with_overrides,
&sources,
config,
build_config,
to_build.manifest().profiles()))
};
return Ok(ret);
}
impl<'a> CompileFilter<'a> {
pub fn new(lib_only: bool,
bins: &'a [String],
tests: &'a [String],
examples: &'a [String],
benches: &'a [String]) -> CompileFilter<'a> {
if lib_only ||!bins.is_empty() ||!tests.is_empty() ||
!examples.is_empty() ||!benches.is_empty() {
CompileFilter::Only {
lib: lib_only, bins: bins, examples: examples, benches: benches,
tests: tests,
}
} else {
CompileFilter::Everything
}
}
pub fn matches(&self, target: &Target) -> bool {
match *self {
CompileFilter::Everything => true,
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let list = match *target.kind() {
TargetKind::Bin => bins,
TargetKind::Test => tests,
TargetKind::Bench => benches,
TargetKind::Example => examples,
TargetKind::Lib(..) => return lib,
TargetKind::CustomBuild => return false,
};
list.iter().any(|x| *x == target.name())
}
}
}
}
/// Given the configuration for a build, this function will generate all
/// target/profile combinations needed to be built.
fn generate_targets<'a>(pkg: &'a Package,
mode: CompileMode,
filter: &CompileFilter,
release: bool)
-> CargoResult<Vec<(&'a Target, &'a Profile)>> {
let profiles = pkg.manifest().profiles();
let build = if release {&profiles.release} else {&profiles.dev};
let test = if release {&profiles.bench} else {&profiles.test};
let profile = match mode {
CompileMode::Test => test,
CompileMode::Bench => &profiles.bench,
CompileMode::Build => build,
CompileMode::Doc {.. } => &profiles.doc,
};
return match *filter {
CompileFilter::Everything => {
match mode {
CompileMode::Bench => {
Ok(pkg.targets().iter().filter(|t| t.benched()).map(|t| {
(t, profile)
}).collect::<Vec<_>>())
}
CompileMode::Test => {
let mut base = pkg.targets().iter().filter(|t| {
t.tested()
}).map(|t| {
(t, if t.is_example() {build} else {profile})
}).collect::<Vec<_>>();
// Always compile the library if we're testing everything as
// it'll be needed for doctests
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
if t.doctested() {
base.push((t, build));
}
}
Ok(base)
}
CompileMode::Build => {
Ok(pkg.targets().iter().filter(|t| {
t.is_bin() || t.is_lib()
}).map(|t| (t, profile)).collect())
}
CompileMode::Doc {.. } => {
Ok(pkg.targets().iter().filter(|t| t.documented())
.map(|t| (t, profile)).collect())
}
}
}
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let mut targets = Vec::new();
if lib {
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
targets.push((t, profile));
} else {
return Err(human(format!("no library targets found")))
}
}
{
let mut find = |names: &[String], desc, kind, profile| {
for name in names {
let target = pkg.targets().iter().find(|t| {
t.name() == *name && *t.kind() == kind
});
let t = match target {
Some(t) => t,
None => return Err(human(format!("no {} target \
named `{}`",
desc, name))),
};
debug!("found {} `{}`", desc, name);
targets.push((t, profile));
}
Ok(())
};
try!(find(bins, "bin", TargetKind::Bin, profile));
try!(find(examples, "example", TargetKind::Example, build));
try!(find(tests, "test", TargetKind::Test, test));
try!(find(benches, "bench", TargetKind::Bench, &profiles.bench));
}
Ok(targets)
}
};
}
/// Read the `paths` configuration variable to discover all path overrides that
/// have been configured.
fn source_ids_from_config(config: &Config, cur_path: &Path)
-> CargoResult<Vec<SourceId>> {
let configs = try!(config.values());
debug!("loaded config; configs={:?}", configs);
let config_paths = match configs.get("paths") {
Some(cfg) => cfg,
None => return Ok(Vec::new())
};
let paths = try!(config_paths.list().chain_error(|| {
internal("invalid configuration for the key `paths`")
}));
paths.iter().map(|&(ref s, ref p)| {
// The path listed next to the string is the config file in which the
// key was located, so we want to pop off the `.cargo/config` component
// to get the directory containing the `.cargo` folder.
p.parent().unwrap().parent().unwrap().join(s)
}).filter(|p| {
// Make sure we don't override the local package, even if it's in the
// list of override paths.
cur_path!= &**p
}).map(|p| SourceId::for_path(&p)).collect()
}
/// Parse all config files to learn about build configuration. Currently
/// configured options are:
///
/// * build.jobs
/// * target.$target.ar
/// * target.$target.linker
/// * target.$target.libfoo.metadata
fn scrape_build_config(config: &Config,
jobs: Option<u32>,
target: Option<String>)
-> CargoResult<ops::BuildConfig> {
let cfg_jobs = match try!(config.get_i64("build.jobs")) {
Some((n, p)) => {
if n <= 0 {
return Err(human(format!("build.jobs must be positive, \
but found {} in {:?}", n, p)));
} else if n >= u32::max_value() as i64 {
return Err(human(format!("build.jobs is too large: \
found {} in {:?}", n, p)));
} else {
Some(n as u32)
}
}
None => None,
};
let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32);
let mut base = ops::BuildConfig {
jobs: jobs,
requested_target: target.clone(),
..Default::default()
};
base.host = try!(scrape_target_config(config, &config.rustc_info().host));
base.target = match target.as_ref() {
Some(triple) => try!(scrape_target_config(config, &triple)),
None => base.host.clone(),
};
Ok(base)
}
fn scrape_target_config(config: &Config, triple: &str)
-> CargoResult<ops::TargetConfig> {
let key = format!("target.{}", triple);
let mut ret = ops::TargetConfig {
ar: try!(config.get_path(&format!("{}.ar", key))),
linker: try!(config.get_path(&format!("{}.linker", key))),
overrides: HashMap::new(),
};
let table = match try!(config.get_table(&key)) {
Some((table, _)) => table,
None => return Ok(ret),
};
for (lib_name, _) in table.into_iter() {
if lib_name == "ar" || lib_name == "linker" { continue }
let mut output = BuildOutput {
library_paths: Vec::new(),
library_links: Vec::new(),
cfgs: Vec::new(),
metadata: Vec::new(),
};
|
ConfigValue::String(v, path) => {
if k == "rustc-flags" {
let whence = format!("in `{}` (in {})", key,
path.display());
let (paths, links) = try!(
BuildOutput::parse_rustc_flags(&v, &whence)
);
output.library_paths.extend(paths.into_iter());
output.library_links.extend(links.into_iter());
} else {
output.metadata.push((k, v));
}
},
ConfigValue::List(a, p) => {
if k == "rustc-link-lib" {
output.library_links.extend(a.into_iter().map(|v| v.0));
} else if k == "rustc-link-search" {
output.library_paths.extend(a.into_iter().map(|v| {
PathBuf::from(&v.0)
}));
} else if k == "rustc-cfg" {
output.cfgs.extend(a.into_iter
|
let key = format!("{}.{}", key, lib_name);
let table = try!(config.get_table(&key)).unwrap().0;
for (k, _) in table.into_iter() {
let key = format!("{}.{}", key, k);
match try!(config.get(&key)).unwrap() {
|
random_line_split
|
cargo_compile.rs
|
out to `--do get` for each source, and build up the list of paths
//! to pass to rustc -L
//! 5. Call `cargo-rustc` with the results of the resolver zipped together with
//! the results of the `get`
//!
//! a. Topologically sort the dependencies
//! b. Compile each dependency in order, passing in the -L's pointing at each
//! previously compiled dependency
//!
use std::collections::HashMap;
use std::default::Default;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use core::registry::PackageRegistry;
use core::{Source, SourceId, PackageSet, Package, Target, PackageId};
use core::{Profile, TargetKind};
use core::resolver::Method;
use ops::{self, BuildOutput, ExecEngine};
use sources::{PathSource};
use util::config::{ConfigValue, Config};
use util::{CargoResult, internal, human, ChainError, profile};
/// Contains information about how a package should be compiled.
pub struct CompileOptions<'a> {
pub config: &'a Config,
/// Number of concurrent jobs to use.
pub jobs: Option<u32>,
/// The target platform to compile for (example: `i686-unknown-linux-gnu`).
pub target: Option<&'a str>,
/// Extra features to build for the root package
pub features: &'a [String],
/// Flag if the default feature should be built for the root package
pub no_default_features: bool,
/// Root package to build (if None it's the current one)
pub spec: Option<&'a str>,
/// Filter to apply to the root package to select which targets will be
/// built.
pub filter: CompileFilter<'a>,
/// Engine which drives compilation
pub exec_engine: Option<Arc<Box<ExecEngine>>>,
/// Whether this is a release build or not
pub release: bool,
/// Mode for this compile.
pub mode: CompileMode,
/// The specified target will be compiled with all the available arguments,
/// note that this only accounts for the *final* invocation of rustc
pub target_rustc_args: Option<&'a [String]>,
}
#[derive(Clone, Copy, PartialEq)]
pub enum CompileMode {
Test,
Build,
Bench,
Doc { deps: bool },
}
pub enum CompileFilter<'a> {
Everything,
Only {
lib: bool,
bins: &'a [String],
examples: &'a [String],
tests: &'a [String],
benches: &'a [String],
}
}
pub fn compile<'a>(manifest_path: &Path,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
debug!("compile; manifest-path={}", manifest_path.display());
let mut source = try!(PathSource::for_path(manifest_path.parent().unwrap(),
options.config));
try!(source.update());
// TODO: Move this into PathSource
let package = try!(source.root_package());
debug!("loaded package; package={}", package);
for key in package.manifest().warnings().iter() {
try!(options.config.shell().warn(key))
}
compile_pkg(&package, Some(Box::new(source)), options)
}
pub fn compile_pkg<'a>(package: &Package,
source: Option<Box<Source + 'a>>,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
let CompileOptions { config, jobs, target, spec, features,
no_default_features, release, mode,
ref filter, ref exec_engine,
ref target_rustc_args } = *options;
let target = target.map(|s| s.to_string());
let features = features.iter().flat_map(|s| {
s.split(' ')
}).map(|s| s.to_string()).collect::<Vec<String>>();
if spec.is_some() && (no_default_features || features.len() > 0) {
return Err(human("features cannot be modified when the main package \
is not being built"))
}
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let override_ids = try!(source_ids_from_config(config, package.root()));
let (packages, resolve_with_overrides, sources) = {
let mut registry = PackageRegistry::new(config);
if let Some(source) = source {
registry.preload(package.package_id().source_id(), source);
} else {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
}
// First, resolve the package's *listed* dependencies, as well as
// downloading and updating all remotes and such.
let resolve = try!(ops::resolve_pkg(&mut registry, package));
// Second, resolve with precisely what we're doing. Filter out
// transitive dependencies if necessary, specify features, handle
// overrides, etc.
let _p = profile::start("resolving w/ overrides...");
try!(registry.add_overrides(override_ids));
let method = Method::Required {
dev_deps: true, // TODO: remove this option?
features: &features,
uses_default_features:!no_default_features,
};
let resolved_with_overrides =
try!(ops::resolve_with_previous(&mut registry, package, method,
Some(&resolve), None));
let req: Vec<PackageId> = resolved_with_overrides.iter().map(|r| {
r.clone()
}).collect();
let packages = try!(registry.get(&req).chain_error(|| {
human("Unable to get packages from source")
}));
(packages, resolved_with_overrides, registry.move_sources())
};
let pkgid = match spec {
Some(spec) => try!(resolve_with_overrides.query(spec)),
None => package.package_id(),
};
let to_build = packages.iter().find(|p| p.package_id() == pkgid).unwrap();
let targets = try!(generate_targets(to_build, mode, filter, release));
let target_with_args = match *target_rustc_args {
Some(args) if targets.len() == 1 => {
let (target, profile) = targets[0];
let mut profile = profile.clone();
profile.rustc_args = Some(args.to_vec());
Some((target, profile))
}
Some(_) => {
return Err(human("extra arguments to `rustc` can only be passed to \
one target, consider filtering\nthe package by \
passing e.g. `--lib` or `--bin NAME` to specify \
a single target"))
}
None => None,
};
let targets = target_with_args.as_ref().map(|&(t, ref p)| vec![(t, p)])
.unwrap_or(targets);
let ret = {
let _p = profile::start("compiling");
let mut build_config = try!(scrape_build_config(config, jobs, target));
build_config.exec_engine = exec_engine.clone();
build_config.release = release;
if let CompileMode::Doc { deps } = mode {
build_config.doc_all = deps;
}
try!(ops::compile_targets(&targets, to_build,
&PackageSet::new(&packages),
&resolve_with_overrides,
&sources,
config,
build_config,
to_build.manifest().profiles()))
};
return Ok(ret);
}
impl<'a> CompileFilter<'a> {
pub fn new(lib_only: bool,
bins: &'a [String],
tests: &'a [String],
examples: &'a [String],
benches: &'a [String]) -> CompileFilter<'a> {
if lib_only ||!bins.is_empty() ||!tests.is_empty() ||
!examples.is_empty() ||!benches.is_empty() {
CompileFilter::Only {
lib: lib_only, bins: bins, examples: examples, benches: benches,
tests: tests,
}
} else {
CompileFilter::Everything
}
}
pub fn matches(&self, target: &Target) -> bool {
match *self {
CompileFilter::Everything => true,
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let list = match *target.kind() {
TargetKind::Bin => bins,
TargetKind::Test => tests,
TargetKind::Bench => benches,
TargetKind::Example => examples,
TargetKind::Lib(..) => return lib,
TargetKind::CustomBuild => return false,
};
list.iter().any(|x| *x == target.name())
}
}
}
}
/// Given the configuration for a build, this function will generate all
/// target/profile combinations needed to be built.
fn generate_targets<'a>(pkg: &'a Package,
mode: CompileMode,
filter: &CompileFilter,
release: bool)
-> CargoResult<Vec<(&'a Target, &'a Profile)>> {
let profiles = pkg.manifest().profiles();
let build = if release {&profiles.release} else {&profiles.dev};
let test = if release {&profiles.bench} else {&profiles.test};
let profile = match mode {
CompileMode::Test => test,
CompileMode::Bench => &profiles.bench,
CompileMode::Build => build,
CompileMode::Doc {.. } => &profiles.doc,
};
return match *filter {
CompileFilter::Everything => {
match mode {
CompileMode::Bench => {
Ok(pkg.targets().iter().filter(|t| t.benched()).map(|t| {
(t, profile)
}).collect::<Vec<_>>())
}
CompileMode::Test => {
let mut base = pkg.targets().iter().filter(|t| {
t.tested()
}).map(|t| {
(t, if t.is_example() {build} else {profile})
}).collect::<Vec<_>>();
// Always compile the library if we're testing everything as
// it'll be needed for doctests
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
if t.doctested() {
base.push((t, build));
}
}
Ok(base)
}
CompileMode::Build => {
Ok(pkg.targets().iter().filter(|t| {
t.is_bin() || t.is_lib()
}).map(|t| (t, profile)).collect())
}
CompileMode::Doc {.. } => {
Ok(pkg.targets().iter().filter(|t| t.documented())
.map(|t| (t, profile)).collect())
}
}
}
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let mut targets = Vec::new();
if lib {
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
targets.push((t, profile));
} else {
return Err(human(format!("no library targets found")))
}
}
{
let mut find = |names: &[String], desc, kind, profile| {
for name in names {
let target = pkg.targets().iter().find(|t| {
t.name() == *name && *t.kind() == kind
});
let t = match target {
Some(t) => t,
None => return Err(human(format!("no {} target \
named `{}`",
desc, name))),
};
debug!("found {} `{}`", desc, name);
targets.push((t, profile));
}
Ok(())
};
try!(find(bins, "bin", TargetKind::Bin, profile));
try!(find(examples, "example", TargetKind::Example, build));
try!(find(tests, "test", TargetKind::Test, test));
try!(find(benches, "bench", TargetKind::Bench, &profiles.bench));
}
Ok(targets)
}
};
}
/// Read the `paths` configuration variable to discover all path overrides that
/// have been configured.
fn source_ids_from_config(config: &Config, cur_path: &Path)
-> CargoResult<Vec<SourceId>> {
let configs = try!(config.values());
debug!("loaded config; configs={:?}", configs);
let config_paths = match configs.get("paths") {
Some(cfg) => cfg,
None => return Ok(Vec::new())
};
let paths = try!(config_paths.list().chain_error(|| {
internal("invalid configuration for the key `paths`")
}));
paths.iter().map(|&(ref s, ref p)| {
// The path listed next to the string is the config file in which the
// key was located, so we want to pop off the `.cargo/config` component
// to get the directory containing the `.cargo` folder.
p.parent().unwrap().parent().unwrap().join(s)
}).filter(|p| {
// Make sure we don't override the local package, even if it's in the
// list of override paths.
cur_path!= &**p
}).map(|p| SourceId::for_path(&p)).collect()
}
/// Parse all config files to learn about build configuration. Currently
/// configured options are:
///
/// * build.jobs
/// * target.$target.ar
/// * target.$target.linker
/// * target.$target.libfoo.metadata
fn scrape_build_config(config: &Config,
jobs: Option<u32>,
target: Option<String>)
-> CargoResult<ops::BuildConfig> {
let cfg_jobs = match try!(config.get_i64("build.jobs")) {
Some((n, p)) => {
if n <= 0 {
return Err(human(format!("build.jobs must be positive, \
but found {} in {:?}", n, p)));
} else if n >= u32::max_value() as i64 {
return Err(human(format!("build.jobs is too large: \
found {} in {:?}", n, p)));
} else
|
}
None => None,
};
let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32);
let mut base = ops::BuildConfig {
jobs: jobs,
requested_target: target.clone(),
..Default::default()
};
base.host = try!(scrape_target_config(config, &config.rustc_info().host));
base.target = match target.as_ref() {
Some(triple) => try!(scrape_target_config(config, &triple)),
None => base.host.clone(),
};
Ok(base)
}
fn scrape_target_config(config: &Config, triple: &str)
-> CargoResult<ops::TargetConfig> {
let key = format!("target.{}", triple);
let mut ret = ops::TargetConfig {
ar: try!(config.get_path(&format!("{}.ar", key))),
linker: try!(config.get_path(&format!("{}.linker", key))),
overrides: HashMap::new(),
};
let table = match try!(config.get_table(&key)) {
Some((table, _)) => table,
None => return Ok(ret),
};
for (lib_name, _) in table.into_iter() {
if lib_name == "ar" || lib_name == "linker" { continue }
let mut output = BuildOutput {
library_paths: Vec::new(),
library_links: Vec::new(),
cfgs: Vec::new(),
metadata: Vec::new(),
};
let key = format!("{}.{}", key, lib_name);
let table = try!(config.get_table(&key)).unwrap().0;
for (k, _) in table.into_iter() {
let key = format!("{}.{}", key, k);
match try!(config.get(&key)).unwrap() {
ConfigValue::String(v, path) => {
if k == "rustc-flags" {
let whence = format!("in `{}` (in {})", key,
path.display());
let (paths, links) = try!(
BuildOutput::parse_rustc_flags(&v, &whence)
);
output.library_paths.extend(paths.into_iter());
output.library_links.extend(links.into_iter());
} else {
output.metadata.push((k, v));
}
},
ConfigValue::List(a, p) => {
if k == "rustc-link-lib" {
output.library_links.extend(a.into_iter().map(|v| v.0));
} else if k == "rustc-link-search" {
output.library_paths.extend(a.into_iter().map(|v| {
PathBuf::from(&v.0)
}));
} else if k == "rustc-cfg" {
output.cfgs.extend(a.into
|
{
Some(n as u32)
}
|
conditional_block
|
cargo_compile.rs
|
out to `--do get` for each source, and build up the list of paths
//! to pass to rustc -L
//! 5. Call `cargo-rustc` with the results of the resolver zipped together with
//! the results of the `get`
//!
//! a. Topologically sort the dependencies
//! b. Compile each dependency in order, passing in the -L's pointing at each
//! previously compiled dependency
//!
use std::collections::HashMap;
use std::default::Default;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use core::registry::PackageRegistry;
use core::{Source, SourceId, PackageSet, Package, Target, PackageId};
use core::{Profile, TargetKind};
use core::resolver::Method;
use ops::{self, BuildOutput, ExecEngine};
use sources::{PathSource};
use util::config::{ConfigValue, Config};
use util::{CargoResult, internal, human, ChainError, profile};
/// Contains information about how a package should be compiled.
pub struct CompileOptions<'a> {
pub config: &'a Config,
/// Number of concurrent jobs to use.
pub jobs: Option<u32>,
/// The target platform to compile for (example: `i686-unknown-linux-gnu`).
pub target: Option<&'a str>,
/// Extra features to build for the root package
pub features: &'a [String],
/// Flag if the default feature should be built for the root package
pub no_default_features: bool,
/// Root package to build (if None it's the current one)
pub spec: Option<&'a str>,
/// Filter to apply to the root package to select which targets will be
/// built.
pub filter: CompileFilter<'a>,
/// Engine which drives compilation
pub exec_engine: Option<Arc<Box<ExecEngine>>>,
/// Whether this is a release build or not
pub release: bool,
/// Mode for this compile.
pub mode: CompileMode,
/// The specified target will be compiled with all the available arguments,
/// note that this only accounts for the *final* invocation of rustc
pub target_rustc_args: Option<&'a [String]>,
}
#[derive(Clone, Copy, PartialEq)]
pub enum CompileMode {
Test,
Build,
Bench,
Doc { deps: bool },
}
pub enum
|
<'a> {
Everything,
Only {
lib: bool,
bins: &'a [String],
examples: &'a [String],
tests: &'a [String],
benches: &'a [String],
}
}
pub fn compile<'a>(manifest_path: &Path,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
debug!("compile; manifest-path={}", manifest_path.display());
let mut source = try!(PathSource::for_path(manifest_path.parent().unwrap(),
options.config));
try!(source.update());
// TODO: Move this into PathSource
let package = try!(source.root_package());
debug!("loaded package; package={}", package);
for key in package.manifest().warnings().iter() {
try!(options.config.shell().warn(key))
}
compile_pkg(&package, Some(Box::new(source)), options)
}
pub fn compile_pkg<'a>(package: &Package,
source: Option<Box<Source + 'a>>,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
let CompileOptions { config, jobs, target, spec, features,
no_default_features, release, mode,
ref filter, ref exec_engine,
ref target_rustc_args } = *options;
let target = target.map(|s| s.to_string());
let features = features.iter().flat_map(|s| {
s.split(' ')
}).map(|s| s.to_string()).collect::<Vec<String>>();
if spec.is_some() && (no_default_features || features.len() > 0) {
return Err(human("features cannot be modified when the main package \
is not being built"))
}
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let override_ids = try!(source_ids_from_config(config, package.root()));
let (packages, resolve_with_overrides, sources) = {
let mut registry = PackageRegistry::new(config);
if let Some(source) = source {
registry.preload(package.package_id().source_id(), source);
} else {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
}
// First, resolve the package's *listed* dependencies, as well as
// downloading and updating all remotes and such.
let resolve = try!(ops::resolve_pkg(&mut registry, package));
// Second, resolve with precisely what we're doing. Filter out
// transitive dependencies if necessary, specify features, handle
// overrides, etc.
let _p = profile::start("resolving w/ overrides...");
try!(registry.add_overrides(override_ids));
let method = Method::Required {
dev_deps: true, // TODO: remove this option?
features: &features,
uses_default_features:!no_default_features,
};
let resolved_with_overrides =
try!(ops::resolve_with_previous(&mut registry, package, method,
Some(&resolve), None));
let req: Vec<PackageId> = resolved_with_overrides.iter().map(|r| {
r.clone()
}).collect();
let packages = try!(registry.get(&req).chain_error(|| {
human("Unable to get packages from source")
}));
(packages, resolved_with_overrides, registry.move_sources())
};
let pkgid = match spec {
Some(spec) => try!(resolve_with_overrides.query(spec)),
None => package.package_id(),
};
let to_build = packages.iter().find(|p| p.package_id() == pkgid).unwrap();
let targets = try!(generate_targets(to_build, mode, filter, release));
let target_with_args = match *target_rustc_args {
Some(args) if targets.len() == 1 => {
let (target, profile) = targets[0];
let mut profile = profile.clone();
profile.rustc_args = Some(args.to_vec());
Some((target, profile))
}
Some(_) => {
return Err(human("extra arguments to `rustc` can only be passed to \
one target, consider filtering\nthe package by \
passing e.g. `--lib` or `--bin NAME` to specify \
a single target"))
}
None => None,
};
let targets = target_with_args.as_ref().map(|&(t, ref p)| vec![(t, p)])
.unwrap_or(targets);
let ret = {
let _p = profile::start("compiling");
let mut build_config = try!(scrape_build_config(config, jobs, target));
build_config.exec_engine = exec_engine.clone();
build_config.release = release;
if let CompileMode::Doc { deps } = mode {
build_config.doc_all = deps;
}
try!(ops::compile_targets(&targets, to_build,
&PackageSet::new(&packages),
&resolve_with_overrides,
&sources,
config,
build_config,
to_build.manifest().profiles()))
};
return Ok(ret);
}
impl<'a> CompileFilter<'a> {
pub fn new(lib_only: bool,
bins: &'a [String],
tests: &'a [String],
examples: &'a [String],
benches: &'a [String]) -> CompileFilter<'a> {
if lib_only ||!bins.is_empty() ||!tests.is_empty() ||
!examples.is_empty() ||!benches.is_empty() {
CompileFilter::Only {
lib: lib_only, bins: bins, examples: examples, benches: benches,
tests: tests,
}
} else {
CompileFilter::Everything
}
}
pub fn matches(&self, target: &Target) -> bool {
match *self {
CompileFilter::Everything => true,
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let list = match *target.kind() {
TargetKind::Bin => bins,
TargetKind::Test => tests,
TargetKind::Bench => benches,
TargetKind::Example => examples,
TargetKind::Lib(..) => return lib,
TargetKind::CustomBuild => return false,
};
list.iter().any(|x| *x == target.name())
}
}
}
}
/// Given the configuration for a build, this function will generate all
/// target/profile combinations needed to be built.
fn generate_targets<'a>(pkg: &'a Package,
mode: CompileMode,
filter: &CompileFilter,
release: bool)
-> CargoResult<Vec<(&'a Target, &'a Profile)>> {
let profiles = pkg.manifest().profiles();
let build = if release {&profiles.release} else {&profiles.dev};
let test = if release {&profiles.bench} else {&profiles.test};
let profile = match mode {
CompileMode::Test => test,
CompileMode::Bench => &profiles.bench,
CompileMode::Build => build,
CompileMode::Doc {.. } => &profiles.doc,
};
return match *filter {
CompileFilter::Everything => {
match mode {
CompileMode::Bench => {
Ok(pkg.targets().iter().filter(|t| t.benched()).map(|t| {
(t, profile)
}).collect::<Vec<_>>())
}
CompileMode::Test => {
let mut base = pkg.targets().iter().filter(|t| {
t.tested()
}).map(|t| {
(t, if t.is_example() {build} else {profile})
}).collect::<Vec<_>>();
// Always compile the library if we're testing everything as
// it'll be needed for doctests
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
if t.doctested() {
base.push((t, build));
}
}
Ok(base)
}
CompileMode::Build => {
Ok(pkg.targets().iter().filter(|t| {
t.is_bin() || t.is_lib()
}).map(|t| (t, profile)).collect())
}
CompileMode::Doc {.. } => {
Ok(pkg.targets().iter().filter(|t| t.documented())
.map(|t| (t, profile)).collect())
}
}
}
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let mut targets = Vec::new();
if lib {
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
targets.push((t, profile));
} else {
return Err(human(format!("no library targets found")))
}
}
{
let mut find = |names: &[String], desc, kind, profile| {
for name in names {
let target = pkg.targets().iter().find(|t| {
t.name() == *name && *t.kind() == kind
});
let t = match target {
Some(t) => t,
None => return Err(human(format!("no {} target \
named `{}`",
desc, name))),
};
debug!("found {} `{}`", desc, name);
targets.push((t, profile));
}
Ok(())
};
try!(find(bins, "bin", TargetKind::Bin, profile));
try!(find(examples, "example", TargetKind::Example, build));
try!(find(tests, "test", TargetKind::Test, test));
try!(find(benches, "bench", TargetKind::Bench, &profiles.bench));
}
Ok(targets)
}
};
}
/// Read the `paths` configuration variable to discover all path overrides that
/// have been configured.
fn source_ids_from_config(config: &Config, cur_path: &Path)
-> CargoResult<Vec<SourceId>> {
let configs = try!(config.values());
debug!("loaded config; configs={:?}", configs);
let config_paths = match configs.get("paths") {
Some(cfg) => cfg,
None => return Ok(Vec::new())
};
let paths = try!(config_paths.list().chain_error(|| {
internal("invalid configuration for the key `paths`")
}));
paths.iter().map(|&(ref s, ref p)| {
// The path listed next to the string is the config file in which the
// key was located, so we want to pop off the `.cargo/config` component
// to get the directory containing the `.cargo` folder.
p.parent().unwrap().parent().unwrap().join(s)
}).filter(|p| {
// Make sure we don't override the local package, even if it's in the
// list of override paths.
cur_path!= &**p
}).map(|p| SourceId::for_path(&p)).collect()
}
/// Parse all config files to learn about build configuration. Currently
/// configured options are:
///
/// * build.jobs
/// * target.$target.ar
/// * target.$target.linker
/// * target.$target.libfoo.metadata
fn scrape_build_config(config: &Config,
jobs: Option<u32>,
target: Option<String>)
-> CargoResult<ops::BuildConfig> {
let cfg_jobs = match try!(config.get_i64("build.jobs")) {
Some((n, p)) => {
if n <= 0 {
return Err(human(format!("build.jobs must be positive, \
but found {} in {:?}", n, p)));
} else if n >= u32::max_value() as i64 {
return Err(human(format!("build.jobs is too large: \
found {} in {:?}", n, p)));
} else {
Some(n as u32)
}
}
None => None,
};
let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32);
let mut base = ops::BuildConfig {
jobs: jobs,
requested_target: target.clone(),
..Default::default()
};
base.host = try!(scrape_target_config(config, &config.rustc_info().host));
base.target = match target.as_ref() {
Some(triple) => try!(scrape_target_config(config, &triple)),
None => base.host.clone(),
};
Ok(base)
}
fn scrape_target_config(config: &Config, triple: &str)
-> CargoResult<ops::TargetConfig> {
let key = format!("target.{}", triple);
let mut ret = ops::TargetConfig {
ar: try!(config.get_path(&format!("{}.ar", key))),
linker: try!(config.get_path(&format!("{}.linker", key))),
overrides: HashMap::new(),
};
let table = match try!(config.get_table(&key)) {
Some((table, _)) => table,
None => return Ok(ret),
};
for (lib_name, _) in table.into_iter() {
if lib_name == "ar" || lib_name == "linker" { continue }
let mut output = BuildOutput {
library_paths: Vec::new(),
library_links: Vec::new(),
cfgs: Vec::new(),
metadata: Vec::new(),
};
let key = format!("{}.{}", key, lib_name);
let table = try!(config.get_table(&key)).unwrap().0;
for (k, _) in table.into_iter() {
let key = format!("{}.{}", key, k);
match try!(config.get(&key)).unwrap() {
ConfigValue::String(v, path) => {
if k == "rustc-flags" {
let whence = format!("in `{}` (in {})", key,
path.display());
let (paths, links) = try!(
BuildOutput::parse_rustc_flags(&v, &whence)
);
output.library_paths.extend(paths.into_iter());
output.library_links.extend(links.into_iter());
} else {
output.metadata.push((k, v));
}
},
ConfigValue::List(a, p) => {
if k == "rustc-link-lib" {
output.library_links.extend(a.into_iter().map(|v| v.0));
} else if k == "rustc-link-search" {
output.library_paths.extend(a.into_iter().map(|v| {
PathBuf::from(&v.0)
}));
} else if k == "rustc-cfg" {
output.cfgs.extend(a.into
|
CompileFilter
|
identifier_name
|
cargo_compile.rs
|
out to `--do get` for each source, and build up the list of paths
//! to pass to rustc -L
//! 5. Call `cargo-rustc` with the results of the resolver zipped together with
//! the results of the `get`
//!
//! a. Topologically sort the dependencies
//! b. Compile each dependency in order, passing in the -L's pointing at each
//! previously compiled dependency
//!
use std::collections::HashMap;
use std::default::Default;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use core::registry::PackageRegistry;
use core::{Source, SourceId, PackageSet, Package, Target, PackageId};
use core::{Profile, TargetKind};
use core::resolver::Method;
use ops::{self, BuildOutput, ExecEngine};
use sources::{PathSource};
use util::config::{ConfigValue, Config};
use util::{CargoResult, internal, human, ChainError, profile};
/// Contains information about how a package should be compiled.
pub struct CompileOptions<'a> {
pub config: &'a Config,
/// Number of concurrent jobs to use.
pub jobs: Option<u32>,
/// The target platform to compile for (example: `i686-unknown-linux-gnu`).
pub target: Option<&'a str>,
/// Extra features to build for the root package
pub features: &'a [String],
/// Flag if the default feature should be built for the root package
pub no_default_features: bool,
/// Root package to build (if None it's the current one)
pub spec: Option<&'a str>,
/// Filter to apply to the root package to select which targets will be
/// built.
pub filter: CompileFilter<'a>,
/// Engine which drives compilation
pub exec_engine: Option<Arc<Box<ExecEngine>>>,
/// Whether this is a release build or not
pub release: bool,
/// Mode for this compile.
pub mode: CompileMode,
/// The specified target will be compiled with all the available arguments,
/// note that this only accounts for the *final* invocation of rustc
pub target_rustc_args: Option<&'a [String]>,
}
#[derive(Clone, Copy, PartialEq)]
pub enum CompileMode {
Test,
Build,
Bench,
Doc { deps: bool },
}
pub enum CompileFilter<'a> {
Everything,
Only {
lib: bool,
bins: &'a [String],
examples: &'a [String],
tests: &'a [String],
benches: &'a [String],
}
}
pub fn compile<'a>(manifest_path: &Path,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
debug!("compile; manifest-path={}", manifest_path.display());
let mut source = try!(PathSource::for_path(manifest_path.parent().unwrap(),
options.config));
try!(source.update());
// TODO: Move this into PathSource
let package = try!(source.root_package());
debug!("loaded package; package={}", package);
for key in package.manifest().warnings().iter() {
try!(options.config.shell().warn(key))
}
compile_pkg(&package, Some(Box::new(source)), options)
}
pub fn compile_pkg<'a>(package: &Package,
source: Option<Box<Source + 'a>>,
options: &CompileOptions<'a>)
-> CargoResult<ops::Compilation<'a>> {
let CompileOptions { config, jobs, target, spec, features,
no_default_features, release, mode,
ref filter, ref exec_engine,
ref target_rustc_args } = *options;
let target = target.map(|s| s.to_string());
let features = features.iter().flat_map(|s| {
s.split(' ')
}).map(|s| s.to_string()).collect::<Vec<String>>();
if spec.is_some() && (no_default_features || features.len() > 0) {
return Err(human("features cannot be modified when the main package \
is not being built"))
}
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let override_ids = try!(source_ids_from_config(config, package.root()));
let (packages, resolve_with_overrides, sources) = {
let mut registry = PackageRegistry::new(config);
if let Some(source) = source {
registry.preload(package.package_id().source_id(), source);
} else {
try!(registry.add_sources(&[package.package_id().source_id()
.clone()]));
}
// First, resolve the package's *listed* dependencies, as well as
// downloading and updating all remotes and such.
let resolve = try!(ops::resolve_pkg(&mut registry, package));
// Second, resolve with precisely what we're doing. Filter out
// transitive dependencies if necessary, specify features, handle
// overrides, etc.
let _p = profile::start("resolving w/ overrides...");
try!(registry.add_overrides(override_ids));
let method = Method::Required {
dev_deps: true, // TODO: remove this option?
features: &features,
uses_default_features:!no_default_features,
};
let resolved_with_overrides =
try!(ops::resolve_with_previous(&mut registry, package, method,
Some(&resolve), None));
let req: Vec<PackageId> = resolved_with_overrides.iter().map(|r| {
r.clone()
}).collect();
let packages = try!(registry.get(&req).chain_error(|| {
human("Unable to get packages from source")
}));
(packages, resolved_with_overrides, registry.move_sources())
};
let pkgid = match spec {
Some(spec) => try!(resolve_with_overrides.query(spec)),
None => package.package_id(),
};
let to_build = packages.iter().find(|p| p.package_id() == pkgid).unwrap();
let targets = try!(generate_targets(to_build, mode, filter, release));
let target_with_args = match *target_rustc_args {
Some(args) if targets.len() == 1 => {
let (target, profile) = targets[0];
let mut profile = profile.clone();
profile.rustc_args = Some(args.to_vec());
Some((target, profile))
}
Some(_) => {
return Err(human("extra arguments to `rustc` can only be passed to \
one target, consider filtering\nthe package by \
passing e.g. `--lib` or `--bin NAME` to specify \
a single target"))
}
None => None,
};
let targets = target_with_args.as_ref().map(|&(t, ref p)| vec![(t, p)])
.unwrap_or(targets);
let ret = {
let _p = profile::start("compiling");
let mut build_config = try!(scrape_build_config(config, jobs, target));
build_config.exec_engine = exec_engine.clone();
build_config.release = release;
if let CompileMode::Doc { deps } = mode {
build_config.doc_all = deps;
}
try!(ops::compile_targets(&targets, to_build,
&PackageSet::new(&packages),
&resolve_with_overrides,
&sources,
config,
build_config,
to_build.manifest().profiles()))
};
return Ok(ret);
}
impl<'a> CompileFilter<'a> {
pub fn new(lib_only: bool,
bins: &'a [String],
tests: &'a [String],
examples: &'a [String],
benches: &'a [String]) -> CompileFilter<'a> {
if lib_only ||!bins.is_empty() ||!tests.is_empty() ||
!examples.is_empty() ||!benches.is_empty() {
CompileFilter::Only {
lib: lib_only, bins: bins, examples: examples, benches: benches,
tests: tests,
}
} else {
CompileFilter::Everything
}
}
pub fn matches(&self, target: &Target) -> bool {
match *self {
CompileFilter::Everything => true,
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let list = match *target.kind() {
TargetKind::Bin => bins,
TargetKind::Test => tests,
TargetKind::Bench => benches,
TargetKind::Example => examples,
TargetKind::Lib(..) => return lib,
TargetKind::CustomBuild => return false,
};
list.iter().any(|x| *x == target.name())
}
}
}
}
/// Given the configuration for a build, this function will generate all
/// target/profile combinations needed to be built.
fn generate_targets<'a>(pkg: &'a Package,
mode: CompileMode,
filter: &CompileFilter,
release: bool)
-> CargoResult<Vec<(&'a Target, &'a Profile)>>
|
t.tested()
}).map(|t| {
(t, if t.is_example() {build} else {profile})
}).collect::<Vec<_>>();
// Always compile the library if we're testing everything as
// it'll be needed for doctests
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
if t.doctested() {
base.push((t, build));
}
}
Ok(base)
}
CompileMode::Build => {
Ok(pkg.targets().iter().filter(|t| {
t.is_bin() || t.is_lib()
}).map(|t| (t, profile)).collect())
}
CompileMode::Doc {.. } => {
Ok(pkg.targets().iter().filter(|t| t.documented())
.map(|t| (t, profile)).collect())
}
}
}
CompileFilter::Only { lib, bins, examples, tests, benches } => {
let mut targets = Vec::new();
if lib {
if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) {
targets.push((t, profile));
} else {
return Err(human(format!("no library targets found")))
}
}
{
let mut find = |names: &[String], desc, kind, profile| {
for name in names {
let target = pkg.targets().iter().find(|t| {
t.name() == *name && *t.kind() == kind
});
let t = match target {
Some(t) => t,
None => return Err(human(format!("no {} target \
named `{}`",
desc, name))),
};
debug!("found {} `{}`", desc, name);
targets.push((t, profile));
}
Ok(())
};
try!(find(bins, "bin", TargetKind::Bin, profile));
try!(find(examples, "example", TargetKind::Example, build));
try!(find(tests, "test", TargetKind::Test, test));
try!(find(benches, "bench", TargetKind::Bench, &profiles.bench));
}
Ok(targets)
}
};
}
/// Read the `paths` configuration variable to discover all path overrides that
/// have been configured.
fn source_ids_from_config(config: &Config, cur_path: &Path)
-> CargoResult<Vec<SourceId>> {
let configs = try!(config.values());
debug!("loaded config; configs={:?}", configs);
let config_paths = match configs.get("paths") {
Some(cfg) => cfg,
None => return Ok(Vec::new())
};
let paths = try!(config_paths.list().chain_error(|| {
internal("invalid configuration for the key `paths`")
}));
paths.iter().map(|&(ref s, ref p)| {
// The path listed next to the string is the config file in which the
// key was located, so we want to pop off the `.cargo/config` component
// to get the directory containing the `.cargo` folder.
p.parent().unwrap().parent().unwrap().join(s)
}).filter(|p| {
// Make sure we don't override the local package, even if it's in the
// list of override paths.
cur_path!= &**p
}).map(|p| SourceId::for_path(&p)).collect()
}
/// Parse all config files to learn about build configuration. Currently
/// configured options are:
///
/// * build.jobs
/// * target.$target.ar
/// * target.$target.linker
/// * target.$target.libfoo.metadata
fn scrape_build_config(config: &Config,
jobs: Option<u32>,
target: Option<String>)
-> CargoResult<ops::BuildConfig> {
let cfg_jobs = match try!(config.get_i64("build.jobs")) {
Some((n, p)) => {
if n <= 0 {
return Err(human(format!("build.jobs must be positive, \
but found {} in {:?}", n, p)));
} else if n >= u32::max_value() as i64 {
return Err(human(format!("build.jobs is too large: \
found {} in {:?}", n, p)));
} else {
Some(n as u32)
}
}
None => None,
};
let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32);
let mut base = ops::BuildConfig {
jobs: jobs,
requested_target: target.clone(),
..Default::default()
};
base.host = try!(scrape_target_config(config, &config.rustc_info().host));
base.target = match target.as_ref() {
Some(triple) => try!(scrape_target_config(config, &triple)),
None => base.host.clone(),
};
Ok(base)
}
fn scrape_target_config(config: &Config, triple: &str)
-> CargoResult<ops::TargetConfig> {
let key = format!("target.{}", triple);
let mut ret = ops::TargetConfig {
ar: try!(config.get_path(&format!("{}.ar", key))),
linker: try!(config.get_path(&format!("{}.linker", key))),
overrides: HashMap::new(),
};
let table = match try!(config.get_table(&key)) {
Some((table, _)) => table,
None => return Ok(ret),
};
for (lib_name, _) in table.into_iter() {
if lib_name == "ar" || lib_name == "linker" { continue }
let mut output = BuildOutput {
library_paths: Vec::new(),
library_links: Vec::new(),
cfgs: Vec::new(),
metadata: Vec::new(),
};
let key = format!("{}.{}", key, lib_name);
let table = try!(config.get_table(&key)).unwrap().0;
for (k, _) in table.into_iter() {
let key = format!("{}.{}", key, k);
match try!(config.get(&key)).unwrap() {
ConfigValue::String(v, path) => {
if k == "rustc-flags" {
let whence = format!("in `{}` (in {})", key,
path.display());
let (paths, links) = try!(
BuildOutput::parse_rustc_flags(&v, &whence)
);
output.library_paths.extend(paths.into_iter());
output.library_links.extend(links.into_iter());
} else {
output.metadata.push((k, v));
}
},
ConfigValue::List(a, p) => {
if k == "rustc-link-lib" {
output.library_links.extend(a.into_iter().map(|v| v.0));
} else if k == "rustc-link-search" {
output.library_paths.extend(a.into_iter().map(|v| {
PathBuf::from(&v.0)
}));
} else if k == "rustc-cfg" {
output.cfgs.extend(a.into
|
{
let profiles = pkg.manifest().profiles();
let build = if release {&profiles.release} else {&profiles.dev};
let test = if release {&profiles.bench} else {&profiles.test};
let profile = match mode {
CompileMode::Test => test,
CompileMode::Bench => &profiles.bench,
CompileMode::Build => build,
CompileMode::Doc { .. } => &profiles.doc,
};
return match *filter {
CompileFilter::Everything => {
match mode {
CompileMode::Bench => {
Ok(pkg.targets().iter().filter(|t| t.benched()).map(|t| {
(t, profile)
}).collect::<Vec<_>>())
}
CompileMode::Test => {
let mut base = pkg.targets().iter().filter(|t| {
|
identifier_body
|
difference.rs
|
use super::Style;
/// When printing out one coloured string followed by another, use one of
/// these rules to figure out which *extra* control codes need to be sent.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Difference {
/// Print out the control codes specified by this style to end up looking
/// like the second string's styles.
ExtraStyles(Style),
/// Converting between these two is impossible, so just send a reset
/// command and then the second string's styles.
Reset,
/// The before style is exactly the same as the after style, so no further
/// control codes need to be printed.
NoDifference,
}
impl Difference {
/// Compute the'style difference' required to turn an existing style into
/// the given, second style.
///
/// For example, to turn green text into green bold text, it's redundant
/// to write a reset command then a second green+bold command, instead of
/// just writing one bold command. This method should see that both styles
/// use the foreground colour green, and reduce it to a single command.
///
/// This method returns an enum value because it's not actually always
/// possible to turn one style into another: for example, text could be
/// made bold and underlined, but you can't remove the bold property
/// without also removing the underline property. So when this has to
/// happen, this function returns None, meaning that the entire set of
/// styles should be reset and begun again.
pub fn between(first: &Style, next: &Style) -> Difference {
use self::Difference::*;
// XXX(Havvy): This algorithm is kind of hard to replicate without
// having the Plain/Foreground enum variants, so I'm just leaving
// it commented out for now, and defaulting to Reset.
if first == next {
return NoDifference;
}
// Cannot un-bold, so must Reset.
if first.is_bold &&!next.is_bold {
return Reset;
}
if first.is_dimmed &&!next.is_dimmed {
return Reset;
}
if first.is_italic &&!next.is_italic {
return Reset;
}
// Cannot un-underline, so must Reset.
if first.is_underline &&!next.is_underline {
return Reset;
}
if first.is_blink &&!next.is_blink {
return Reset;
}
if first.is_reverse &&!next.is_reverse {
return Reset;
}
if first.is_hidden &&!next.is_hidden
|
if first.is_strikethrough &&!next.is_strikethrough {
return Reset;
}
// Cannot go from foreground to no foreground, so must Reset.
if first.foreground.is_some() && next.foreground.is_none() {
return Reset;
}
// Cannot go from background to no background, so must Reset.
if first.background.is_some() && next.background.is_none() {
return Reset;
}
let mut extra_styles = Style::default();
if first.is_bold!= next.is_bold {
extra_styles.is_bold = true;
}
if first.is_dimmed!= next.is_dimmed {
extra_styles.is_dimmed = true;
}
if first.is_italic!= next.is_italic {
extra_styles.is_italic = true;
}
if first.is_underline!= next.is_underline {
extra_styles.is_underline = true;
}
if first.is_blink!= next.is_blink {
extra_styles.is_blink = true;
}
if first.is_reverse!= next.is_reverse {
extra_styles.is_reverse = true;
}
if first.is_hidden!= next.is_hidden {
extra_styles.is_hidden = true;
}
if first.is_strikethrough!= next.is_strikethrough {
extra_styles.is_strikethrough = true;
}
if first.foreground!= next.foreground {
extra_styles.foreground = next.foreground;
}
if first.background!= next.background {
extra_styles.background = next.background;
}
ExtraStyles(extra_styles)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::Difference::*;
use style::Colour::*;
use style::Style;
fn style() -> Style {
Style::new()
}
macro_rules! test {
($name: ident: $first: expr; $next: expr => $result: expr) => {
#[test]
fn $name() {
assert_eq!($result, Difference::between(&$first, &$next));
}
};
}
test!(nothing: Green.normal(); Green.normal() => NoDifference);
test!(uppercase: Green.normal(); Green.bold() => ExtraStyles(style().bold()));
test!(lowercase: Green.bold(); Green.normal() => Reset);
test!(nothing2: Green.bold(); Green.bold() => NoDifference);
test!(colour_change: Red.normal(); Blue.normal() => ExtraStyles(Blue.normal()));
test!(addition_of_blink: style(); style().blink() => ExtraStyles(style().blink()));
test!(addition_of_dimmed: style(); style().dimmed() => ExtraStyles(style().dimmed()));
test!(addition_of_hidden: style(); style().hidden() => ExtraStyles(style().hidden()));
test!(addition_of_reverse: style(); style().reverse() => ExtraStyles(style().reverse()));
test!(addition_of_strikethrough: style(); style().strikethrough() => ExtraStyles(style().strikethrough()));
test!(removal_of_strikethrough: style().strikethrough(); style() => Reset);
test!(removal_of_reverse: style().reverse(); style() => Reset);
test!(removal_of_hidden: style().hidden(); style() => Reset);
test!(removal_of_dimmed: style().dimmed(); style() => Reset);
test!(removal_of_blink: style().blink(); style() => Reset);
}
|
{
return Reset;
}
|
conditional_block
|
difference.rs
|
use super::Style;
/// When printing out one coloured string followed by another, use one of
/// these rules to figure out which *extra* control codes need to be sent.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Difference {
/// Print out the control codes specified by this style to end up looking
/// like the second string's styles.
ExtraStyles(Style),
/// Converting between these two is impossible, so just send a reset
/// command and then the second string's styles.
Reset,
/// The before style is exactly the same as the after style, so no further
/// control codes need to be printed.
NoDifference,
}
impl Difference {
/// Compute the'style difference' required to turn an existing style into
/// the given, second style.
///
/// For example, to turn green text into green bold text, it's redundant
/// to write a reset command then a second green+bold command, instead of
/// just writing one bold command. This method should see that both styles
/// use the foreground colour green, and reduce it to a single command.
///
/// This method returns an enum value because it's not actually always
/// possible to turn one style into another: for example, text could be
/// made bold and underlined, but you can't remove the bold property
/// without also removing the underline property. So when this has to
/// happen, this function returns None, meaning that the entire set of
/// styles should be reset and begun again.
pub fn
|
(first: &Style, next: &Style) -> Difference {
use self::Difference::*;
// XXX(Havvy): This algorithm is kind of hard to replicate without
// having the Plain/Foreground enum variants, so I'm just leaving
// it commented out for now, and defaulting to Reset.
if first == next {
return NoDifference;
}
// Cannot un-bold, so must Reset.
if first.is_bold &&!next.is_bold {
return Reset;
}
if first.is_dimmed &&!next.is_dimmed {
return Reset;
}
if first.is_italic &&!next.is_italic {
return Reset;
}
// Cannot un-underline, so must Reset.
if first.is_underline &&!next.is_underline {
return Reset;
}
if first.is_blink &&!next.is_blink {
return Reset;
}
if first.is_reverse &&!next.is_reverse {
return Reset;
}
if first.is_hidden &&!next.is_hidden {
return Reset;
}
if first.is_strikethrough &&!next.is_strikethrough {
return Reset;
}
// Cannot go from foreground to no foreground, so must Reset.
if first.foreground.is_some() && next.foreground.is_none() {
return Reset;
}
// Cannot go from background to no background, so must Reset.
if first.background.is_some() && next.background.is_none() {
return Reset;
}
let mut extra_styles = Style::default();
if first.is_bold!= next.is_bold {
extra_styles.is_bold = true;
}
if first.is_dimmed!= next.is_dimmed {
extra_styles.is_dimmed = true;
}
if first.is_italic!= next.is_italic {
extra_styles.is_italic = true;
}
if first.is_underline!= next.is_underline {
extra_styles.is_underline = true;
}
if first.is_blink!= next.is_blink {
extra_styles.is_blink = true;
}
if first.is_reverse!= next.is_reverse {
extra_styles.is_reverse = true;
}
if first.is_hidden!= next.is_hidden {
extra_styles.is_hidden = true;
}
if first.is_strikethrough!= next.is_strikethrough {
extra_styles.is_strikethrough = true;
}
if first.foreground!= next.foreground {
extra_styles.foreground = next.foreground;
}
if first.background!= next.background {
extra_styles.background = next.background;
}
ExtraStyles(extra_styles)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::Difference::*;
use style::Colour::*;
use style::Style;
fn style() -> Style {
Style::new()
}
macro_rules! test {
($name: ident: $first: expr; $next: expr => $result: expr) => {
#[test]
fn $name() {
assert_eq!($result, Difference::between(&$first, &$next));
}
};
}
test!(nothing: Green.normal(); Green.normal() => NoDifference);
test!(uppercase: Green.normal(); Green.bold() => ExtraStyles(style().bold()));
test!(lowercase: Green.bold(); Green.normal() => Reset);
test!(nothing2: Green.bold(); Green.bold() => NoDifference);
test!(colour_change: Red.normal(); Blue.normal() => ExtraStyles(Blue.normal()));
test!(addition_of_blink: style(); style().blink() => ExtraStyles(style().blink()));
test!(addition_of_dimmed: style(); style().dimmed() => ExtraStyles(style().dimmed()));
test!(addition_of_hidden: style(); style().hidden() => ExtraStyles(style().hidden()));
test!(addition_of_reverse: style(); style().reverse() => ExtraStyles(style().reverse()));
test!(addition_of_strikethrough: style(); style().strikethrough() => ExtraStyles(style().strikethrough()));
test!(removal_of_strikethrough: style().strikethrough(); style() => Reset);
test!(removal_of_reverse: style().reverse(); style() => Reset);
test!(removal_of_hidden: style().hidden(); style() => Reset);
test!(removal_of_dimmed: style().dimmed(); style() => Reset);
test!(removal_of_blink: style().blink(); style() => Reset);
}
|
between
|
identifier_name
|
difference.rs
|
use super::Style;
/// When printing out one coloured string followed by another, use one of
/// these rules to figure out which *extra* control codes need to be sent.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Difference {
/// Print out the control codes specified by this style to end up looking
/// like the second string's styles.
ExtraStyles(Style),
/// Converting between these two is impossible, so just send a reset
/// command and then the second string's styles.
Reset,
/// The before style is exactly the same as the after style, so no further
/// control codes need to be printed.
NoDifference,
}
impl Difference {
/// Compute the'style difference' required to turn an existing style into
/// the given, second style.
///
/// For example, to turn green text into green bold text, it's redundant
/// to write a reset command then a second green+bold command, instead of
/// just writing one bold command. This method should see that both styles
/// use the foreground colour green, and reduce it to a single command.
///
/// This method returns an enum value because it's not actually always
/// possible to turn one style into another: for example, text could be
/// made bold and underlined, but you can't remove the bold property
/// without also removing the underline property. So when this has to
/// happen, this function returns None, meaning that the entire set of
/// styles should be reset and begun again.
pub fn between(first: &Style, next: &Style) -> Difference
|
if first.is_italic &&!next.is_italic {
return Reset;
}
// Cannot un-underline, so must Reset.
if first.is_underline &&!next.is_underline {
return Reset;
}
if first.is_blink &&!next.is_blink {
return Reset;
}
if first.is_reverse &&!next.is_reverse {
return Reset;
}
if first.is_hidden &&!next.is_hidden {
return Reset;
}
if first.is_strikethrough &&!next.is_strikethrough {
return Reset;
}
// Cannot go from foreground to no foreground, so must Reset.
if first.foreground.is_some() && next.foreground.is_none() {
return Reset;
}
// Cannot go from background to no background, so must Reset.
if first.background.is_some() && next.background.is_none() {
return Reset;
}
let mut extra_styles = Style::default();
if first.is_bold!= next.is_bold {
extra_styles.is_bold = true;
}
if first.is_dimmed!= next.is_dimmed {
extra_styles.is_dimmed = true;
}
if first.is_italic!= next.is_italic {
extra_styles.is_italic = true;
}
if first.is_underline!= next.is_underline {
extra_styles.is_underline = true;
}
if first.is_blink!= next.is_blink {
extra_styles.is_blink = true;
}
if first.is_reverse!= next.is_reverse {
extra_styles.is_reverse = true;
}
if first.is_hidden!= next.is_hidden {
extra_styles.is_hidden = true;
}
if first.is_strikethrough!= next.is_strikethrough {
extra_styles.is_strikethrough = true;
}
if first.foreground!= next.foreground {
extra_styles.foreground = next.foreground;
}
if first.background!= next.background {
extra_styles.background = next.background;
}
ExtraStyles(extra_styles)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::Difference::*;
use style::Colour::*;
use style::Style;
fn style() -> Style {
Style::new()
}
macro_rules! test {
($name: ident: $first: expr; $next: expr => $result: expr) => {
#[test]
fn $name() {
assert_eq!($result, Difference::between(&$first, &$next));
}
};
}
test!(nothing: Green.normal(); Green.normal() => NoDifference);
test!(uppercase: Green.normal(); Green.bold() => ExtraStyles(style().bold()));
test!(lowercase: Green.bold(); Green.normal() => Reset);
test!(nothing2: Green.bold(); Green.bold() => NoDifference);
test!(colour_change: Red.normal(); Blue.normal() => ExtraStyles(Blue.normal()));
test!(addition_of_blink: style(); style().blink() => ExtraStyles(style().blink()));
test!(addition_of_dimmed: style(); style().dimmed() => ExtraStyles(style().dimmed()));
test!(addition_of_hidden: style(); style().hidden() => ExtraStyles(style().hidden()));
test!(addition_of_reverse: style(); style().reverse() => ExtraStyles(style().reverse()));
test!(addition_of_strikethrough: style(); style().strikethrough() => ExtraStyles(style().strikethrough()));
test!(removal_of_strikethrough: style().strikethrough(); style() => Reset);
test!(removal_of_reverse: style().reverse(); style() => Reset);
test!(removal_of_hidden: style().hidden(); style() => Reset);
test!(removal_of_dimmed: style().dimmed(); style() => Reset);
test!(removal_of_blink: style().blink(); style() => Reset);
}
|
{
use self::Difference::*;
// XXX(Havvy): This algorithm is kind of hard to replicate without
// having the Plain/Foreground enum variants, so I'm just leaving
// it commented out for now, and defaulting to Reset.
if first == next {
return NoDifference;
}
// Cannot un-bold, so must Reset.
if first.is_bold && !next.is_bold {
return Reset;
}
if first.is_dimmed && !next.is_dimmed {
return Reset;
}
|
identifier_body
|
difference.rs
|
use super::Style;
/// When printing out one coloured string followed by another, use one of
/// these rules to figure out which *extra* control codes need to be sent.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Difference {
/// Print out the control codes specified by this style to end up looking
/// like the second string's styles.
ExtraStyles(Style),
/// Converting between these two is impossible, so just send a reset
/// command and then the second string's styles.
Reset,
/// The before style is exactly the same as the after style, so no further
/// control codes need to be printed.
NoDifference,
}
impl Difference {
/// Compute the'style difference' required to turn an existing style into
/// the given, second style.
///
/// For example, to turn green text into green bold text, it's redundant
/// to write a reset command then a second green+bold command, instead of
/// just writing one bold command. This method should see that both styles
/// use the foreground colour green, and reduce it to a single command.
///
/// This method returns an enum value because it's not actually always
/// possible to turn one style into another: for example, text could be
/// made bold and underlined, but you can't remove the bold property
/// without also removing the underline property. So when this has to
/// happen, this function returns None, meaning that the entire set of
/// styles should be reset and begun again.
pub fn between(first: &Style, next: &Style) -> Difference {
use self::Difference::*;
// XXX(Havvy): This algorithm is kind of hard to replicate without
// having the Plain/Foreground enum variants, so I'm just leaving
// it commented out for now, and defaulting to Reset.
if first == next {
return NoDifference;
}
// Cannot un-bold, so must Reset.
if first.is_bold &&!next.is_bold {
return Reset;
}
if first.is_dimmed &&!next.is_dimmed {
return Reset;
}
if first.is_italic &&!next.is_italic {
return Reset;
}
// Cannot un-underline, so must Reset.
if first.is_underline &&!next.is_underline {
return Reset;
}
if first.is_blink &&!next.is_blink {
return Reset;
}
if first.is_reverse &&!next.is_reverse {
return Reset;
}
if first.is_hidden &&!next.is_hidden {
return Reset;
}
if first.is_strikethrough &&!next.is_strikethrough {
return Reset;
}
// Cannot go from foreground to no foreground, so must Reset.
if first.foreground.is_some() && next.foreground.is_none() {
return Reset;
}
// Cannot go from background to no background, so must Reset.
if first.background.is_some() && next.background.is_none() {
return Reset;
}
let mut extra_styles = Style::default();
if first.is_bold!= next.is_bold {
extra_styles.is_bold = true;
}
if first.is_dimmed!= next.is_dimmed {
extra_styles.is_dimmed = true;
}
if first.is_italic!= next.is_italic {
extra_styles.is_italic = true;
}
if first.is_underline!= next.is_underline {
extra_styles.is_underline = true;
}
if first.is_blink!= next.is_blink {
extra_styles.is_blink = true;
}
if first.is_reverse!= next.is_reverse {
extra_styles.is_reverse = true;
}
if first.is_hidden!= next.is_hidden {
extra_styles.is_hidden = true;
}
if first.is_strikethrough!= next.is_strikethrough {
extra_styles.is_strikethrough = true;
}
if first.foreground!= next.foreground {
extra_styles.foreground = next.foreground;
}
if first.background!= next.background {
extra_styles.background = next.background;
}
ExtraStyles(extra_styles)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::Difference::*;
|
use style::Colour::*;
use style::Style;
fn style() -> Style {
Style::new()
}
macro_rules! test {
($name: ident: $first: expr; $next: expr => $result: expr) => {
#[test]
fn $name() {
assert_eq!($result, Difference::between(&$first, &$next));
}
};
}
test!(nothing: Green.normal(); Green.normal() => NoDifference);
test!(uppercase: Green.normal(); Green.bold() => ExtraStyles(style().bold()));
test!(lowercase: Green.bold(); Green.normal() => Reset);
test!(nothing2: Green.bold(); Green.bold() => NoDifference);
test!(colour_change: Red.normal(); Blue.normal() => ExtraStyles(Blue.normal()));
test!(addition_of_blink: style(); style().blink() => ExtraStyles(style().blink()));
test!(addition_of_dimmed: style(); style().dimmed() => ExtraStyles(style().dimmed()));
test!(addition_of_hidden: style(); style().hidden() => ExtraStyles(style().hidden()));
test!(addition_of_reverse: style(); style().reverse() => ExtraStyles(style().reverse()));
test!(addition_of_strikethrough: style(); style().strikethrough() => ExtraStyles(style().strikethrough()));
test!(removal_of_strikethrough: style().strikethrough(); style() => Reset);
test!(removal_of_reverse: style().reverse(); style() => Reset);
test!(removal_of_hidden: style().hidden(); style() => Reset);
test!(removal_of_dimmed: style().dimmed(); style() => Reset);
test!(removal_of_blink: style().blink(); style() => Reset);
}
|
random_line_split
|
|
list_item.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Layout for elements with a CSS `display` property of `list-item`. These elements consist of a
//! block and an extra inline fragment for the marker.
#![deny(unsafe_code)]
use app_units::Au;
use block::BlockFlow;
use context::LayoutContext;
use display_list_builder::{DisplayListBuildState, ListItemFlowDisplayListBuilding};
use euclid::Point2D;
use floats::FloatKind;
use flow::{Flow, FlowClass, OpaqueFlow};
use fragment::Overflow;
use fragment::{CoordinateSystem, Fragment, FragmentBorderBoxIterator, GeneratedContentInfo};
use generated_content;
use gfx::display_list::StackingContext;
use gfx_traits::StackingContextId;
use inline::InlineMetrics;
use script_layout_interface::restyle_damage::RESOLVE_GENERATED_CONTENT;
use std::sync::Arc;
use style::computed_values::{list_style_type, position};
use style::logical_geometry::LogicalSize;
use style::properties::{ComputedValues, ServoComputedValues};
use style::servo::SharedStyleContext;
use text;
/// A block with the CSS `display` property equal to `list-item`.
#[derive(Debug)]
pub struct ListItemFlow {
/// Data common to all block flows.
pub block_flow: BlockFlow,
/// The marker, if outside. (Markers that are inside are instead just fragments on the interior
/// `InlineFlow`.)
pub marker_fragments: Vec<Fragment>,
}
impl ListItemFlow {
pub fn from_fragments_and_flotation(main_fragment: Fragment,
marker_fragments: Vec<Fragment>,
flotation: Option<FloatKind>)
-> ListItemFlow {
let mut this = ListItemFlow {
block_flow: BlockFlow::from_fragment(main_fragment, flotation),
marker_fragments: marker_fragments,
};
if let Some(ref marker) = this.marker_fragments.first() {
match marker.style().get_list().list_style_type {
list_style_type::T::disc |
list_style_type::T::none |
list_style_type::T::circle |
list_style_type::T::square |
list_style_type::T::disclosure_open |
list_style_type::T::disclosure_closed => {}
_ => this.block_flow.base.restyle_damage.insert(RESOLVE_GENERATED_CONTENT),
}
}
this
}
}
impl Flow for ListItemFlow {
fn class(&self) -> FlowClass {
FlowClass::ListItem
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow
|
fn bubble_inline_sizes(&mut self) {
// The marker contributes no intrinsic inline-size, so…
self.block_flow.bubble_inline_sizes()
}
fn assign_inline_sizes(&mut self, shared_context: &SharedStyleContext) {
self.block_flow.assign_inline_sizes(shared_context);
let mut marker_inline_start = self.block_flow.fragment.border_box.start.i;
for marker in self.marker_fragments.iter_mut().rev() {
let containing_block_inline_size = self.block_flow.base.block_container_inline_size;
let container_block_size = self.block_flow.explicit_block_containing_size(shared_context);
marker.assign_replaced_inline_size_if_necessary(containing_block_inline_size, container_block_size);
// Do this now. There's no need to do this in bubble-widths, since markers do not
// contribute to the inline size of this flow.
let intrinsic_inline_sizes = marker.compute_intrinsic_inline_sizes();
marker.border_box.size.inline =
intrinsic_inline_sizes.content_intrinsic_sizes.preferred_inline_size;
marker_inline_start = marker_inline_start - marker.border_box.size.inline;
marker.border_box.start.i = marker_inline_start;
}
}
fn assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
self.block_flow.assign_block_size(layout_context);
for marker in &mut self.marker_fragments {
let containing_block_block_size =
self.block_flow.base.block_container_explicit_block_size;
marker.assign_replaced_block_size_if_necessary(containing_block_block_size);
let font_metrics =
text::font_metrics_for_style(&mut layout_context.font_context(),
marker.style.get_font_arc());
let line_height = text::line_height_from_style(&*marker.style, &font_metrics);
let item_inline_metrics = InlineMetrics::from_font_metrics(&font_metrics, line_height);
let marker_inline_metrics = marker.inline_metrics(layout_context);
marker.border_box.start.b = item_inline_metrics.block_size_above_baseline -
marker_inline_metrics.ascent;
marker.border_box.size.block = marker_inline_metrics.ascent +
marker_inline_metrics.depth_below_baseline;
}
}
fn compute_absolute_position(&mut self, layout_context: &LayoutContext) {
self.block_flow.compute_absolute_position(layout_context)
}
fn place_float_if_applicable<'a>(&mut self) {
self.block_flow.place_float_if_applicable()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
self.build_display_list_for_list_item(state);
}
fn collect_stacking_contexts(&mut self,
parent_id: StackingContextId,
contexts: &mut Vec<Box<StackingContext>>)
-> StackingContextId {
self.block_flow.collect_stacking_contexts(parent_id, contexts)
}
fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
let mut overflow = self.block_flow.compute_overflow();
let flow_size = self.block_flow.base.position.size.to_physical(self.block_flow.base.writing_mode);
let relative_containing_block_size =
&self.block_flow.base.early_absolute_position_info.relative_containing_block_size;
for fragment in &self.marker_fragments {
overflow.union(&fragment.compute_overflow(&flow_size, &relative_containing_block_size))
}
overflow
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
/// The 'position' property of this flow.
fn positioning(&self) -> position::T {
self.block_flow.positioning()
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator,
level,
stacking_context_position);
for marker in &self.marker_fragments {
if iterator.should_process(marker) {
iterator.process(
marker,
level,
&marker.stacking_relative_border_box(&self.block_flow
.base
.stacking_relative_position,
&self.block_flow
.base
.early_absolute_position_info
.relative_containing_block_size,
self.block_flow
.base
.early_absolute_position_info
.relative_containing_block_mode,
CoordinateSystem::Own)
.translate(stacking_context_position));
}
}
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
for marker in &mut self.marker_fragments {
(*mutator)(marker)
}
}
}
/// The kind of content that `list-style-type` results in.
pub enum ListStyleTypeContent {
None,
StaticText(char),
GeneratedContent(Box<GeneratedContentInfo>),
}
impl ListStyleTypeContent {
/// Returns the content to be used for the given value of the `list-style-type` property.
pub fn from_list_style_type(list_style_type: list_style_type::T) -> ListStyleTypeContent {
// Just to keep things simple, use a nonbreaking space (Unicode 0xa0) to provide the marker
// separation.
match list_style_type {
list_style_type::T::none => ListStyleTypeContent::None,
list_style_type::T::disc | list_style_type::T::circle | list_style_type::T::square |
list_style_type::T::disclosure_open | list_style_type::T::disclosure_closed => {
let text = generated_content::static_representation(list_style_type);
ListStyleTypeContent::StaticText(text)
}
_ => ListStyleTypeContent::GeneratedContent(box GeneratedContentInfo::ListItem),
}
}
}
|
{
&self.block_flow
}
|
identifier_body
|
list_item.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Layout for elements with a CSS `display` property of `list-item`. These elements consist of a
//! block and an extra inline fragment for the marker.
#![deny(unsafe_code)]
use app_units::Au;
use block::BlockFlow;
use context::LayoutContext;
use display_list_builder::{DisplayListBuildState, ListItemFlowDisplayListBuilding};
use euclid::Point2D;
use floats::FloatKind;
use flow::{Flow, FlowClass, OpaqueFlow};
use fragment::Overflow;
use fragment::{CoordinateSystem, Fragment, FragmentBorderBoxIterator, GeneratedContentInfo};
use generated_content;
use gfx::display_list::StackingContext;
use gfx_traits::StackingContextId;
use inline::InlineMetrics;
use script_layout_interface::restyle_damage::RESOLVE_GENERATED_CONTENT;
use std::sync::Arc;
use style::computed_values::{list_style_type, position};
use style::logical_geometry::LogicalSize;
use style::properties::{ComputedValues, ServoComputedValues};
use style::servo::SharedStyleContext;
use text;
/// A block with the CSS `display` property equal to `list-item`.
#[derive(Debug)]
pub struct ListItemFlow {
/// Data common to all block flows.
pub block_flow: BlockFlow,
/// The marker, if outside. (Markers that are inside are instead just fragments on the interior
/// `InlineFlow`.)
pub marker_fragments: Vec<Fragment>,
}
impl ListItemFlow {
pub fn from_fragments_and_flotation(main_fragment: Fragment,
marker_fragments: Vec<Fragment>,
flotation: Option<FloatKind>)
-> ListItemFlow {
let mut this = ListItemFlow {
block_flow: BlockFlow::from_fragment(main_fragment, flotation),
marker_fragments: marker_fragments,
};
if let Some(ref marker) = this.marker_fragments.first() {
match marker.style().get_list().list_style_type {
list_style_type::T::disc |
list_style_type::T::none |
list_style_type::T::circle |
list_style_type::T::square |
list_style_type::T::disclosure_open |
list_style_type::T::disclosure_closed => {}
_ => this.block_flow.base.restyle_damage.insert(RESOLVE_GENERATED_CONTENT),
}
}
this
}
}
impl Flow for ListItemFlow {
fn class(&self) -> FlowClass {
FlowClass::ListItem
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn bubble_inline_sizes(&mut self) {
// The marker contributes no intrinsic inline-size, so…
self.block_flow.bubble_inline_sizes()
}
fn assign_inline_sizes(&mut self, shared_context: &SharedStyleContext) {
self.block_flow.assign_inline_sizes(shared_context);
let mut marker_inline_start = self.block_flow.fragment.border_box.start.i;
for marker in self.marker_fragments.iter_mut().rev() {
let containing_block_inline_size = self.block_flow.base.block_container_inline_size;
let container_block_size = self.block_flow.explicit_block_containing_size(shared_context);
marker.assign_replaced_inline_size_if_necessary(containing_block_inline_size, container_block_size);
// Do this now. There's no need to do this in bubble-widths, since markers do not
// contribute to the inline size of this flow.
let intrinsic_inline_sizes = marker.compute_intrinsic_inline_sizes();
marker.border_box.size.inline =
intrinsic_inline_sizes.content_intrinsic_sizes.preferred_inline_size;
marker_inline_start = marker_inline_start - marker.border_box.size.inline;
marker.border_box.start.i = marker_inline_start;
}
}
fn assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
self.block_flow.assign_block_size(layout_context);
for marker in &mut self.marker_fragments {
let containing_block_block_size =
self.block_flow.base.block_container_explicit_block_size;
marker.assign_replaced_block_size_if_necessary(containing_block_block_size);
let font_metrics =
text::font_metrics_for_style(&mut layout_context.font_context(),
marker.style.get_font_arc());
let line_height = text::line_height_from_style(&*marker.style, &font_metrics);
let item_inline_metrics = InlineMetrics::from_font_metrics(&font_metrics, line_height);
let marker_inline_metrics = marker.inline_metrics(layout_context);
marker.border_box.start.b = item_inline_metrics.block_size_above_baseline -
marker_inline_metrics.ascent;
marker.border_box.size.block = marker_inline_metrics.ascent +
marker_inline_metrics.depth_below_baseline;
}
}
fn compute_absolute_position(&mut self, layout_context: &LayoutContext) {
self.block_flow.compute_absolute_position(layout_context)
}
fn place_float_if_applicable<'a>(&mut self) {
self.block_flow.place_float_if_applicable()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn bu
|
mut self, state: &mut DisplayListBuildState) {
self.build_display_list_for_list_item(state);
}
fn collect_stacking_contexts(&mut self,
parent_id: StackingContextId,
contexts: &mut Vec<Box<StackingContext>>)
-> StackingContextId {
self.block_flow.collect_stacking_contexts(parent_id, contexts)
}
fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
let mut overflow = self.block_flow.compute_overflow();
let flow_size = self.block_flow.base.position.size.to_physical(self.block_flow.base.writing_mode);
let relative_containing_block_size =
&self.block_flow.base.early_absolute_position_info.relative_containing_block_size;
for fragment in &self.marker_fragments {
overflow.union(&fragment.compute_overflow(&flow_size, &relative_containing_block_size))
}
overflow
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
/// The 'position' property of this flow.
fn positioning(&self) -> position::T {
self.block_flow.positioning()
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator,
level,
stacking_context_position);
for marker in &self.marker_fragments {
if iterator.should_process(marker) {
iterator.process(
marker,
level,
&marker.stacking_relative_border_box(&self.block_flow
.base
.stacking_relative_position,
&self.block_flow
.base
.early_absolute_position_info
.relative_containing_block_size,
self.block_flow
.base
.early_absolute_position_info
.relative_containing_block_mode,
CoordinateSystem::Own)
.translate(stacking_context_position));
}
}
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
for marker in &mut self.marker_fragments {
(*mutator)(marker)
}
}
}
/// The kind of content that `list-style-type` results in.
pub enum ListStyleTypeContent {
None,
StaticText(char),
GeneratedContent(Box<GeneratedContentInfo>),
}
impl ListStyleTypeContent {
/// Returns the content to be used for the given value of the `list-style-type` property.
pub fn from_list_style_type(list_style_type: list_style_type::T) -> ListStyleTypeContent {
// Just to keep things simple, use a nonbreaking space (Unicode 0xa0) to provide the marker
// separation.
match list_style_type {
list_style_type::T::none => ListStyleTypeContent::None,
list_style_type::T::disc | list_style_type::T::circle | list_style_type::T::square |
list_style_type::T::disclosure_open | list_style_type::T::disclosure_closed => {
let text = generated_content::static_representation(list_style_type);
ListStyleTypeContent::StaticText(text)
}
_ => ListStyleTypeContent::GeneratedContent(box GeneratedContentInfo::ListItem),
}
}
}
|
ild_display_list(&
|
identifier_name
|
list_item.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Layout for elements with a CSS `display` property of `list-item`. These elements consist of a
//! block and an extra inline fragment for the marker.
#![deny(unsafe_code)]
use app_units::Au;
use block::BlockFlow;
use context::LayoutContext;
use display_list_builder::{DisplayListBuildState, ListItemFlowDisplayListBuilding};
use euclid::Point2D;
use floats::FloatKind;
use flow::{Flow, FlowClass, OpaqueFlow};
use fragment::Overflow;
use fragment::{CoordinateSystem, Fragment, FragmentBorderBoxIterator, GeneratedContentInfo};
use generated_content;
use gfx::display_list::StackingContext;
use gfx_traits::StackingContextId;
use inline::InlineMetrics;
use script_layout_interface::restyle_damage::RESOLVE_GENERATED_CONTENT;
use std::sync::Arc;
use style::computed_values::{list_style_type, position};
use style::logical_geometry::LogicalSize;
use style::properties::{ComputedValues, ServoComputedValues};
use style::servo::SharedStyleContext;
use text;
/// A block with the CSS `display` property equal to `list-item`.
#[derive(Debug)]
pub struct ListItemFlow {
/// Data common to all block flows.
pub block_flow: BlockFlow,
/// The marker, if outside. (Markers that are inside are instead just fragments on the interior
/// `InlineFlow`.)
pub marker_fragments: Vec<Fragment>,
}
impl ListItemFlow {
pub fn from_fragments_and_flotation(main_fragment: Fragment,
marker_fragments: Vec<Fragment>,
flotation: Option<FloatKind>)
-> ListItemFlow {
let mut this = ListItemFlow {
block_flow: BlockFlow::from_fragment(main_fragment, flotation),
marker_fragments: marker_fragments,
};
if let Some(ref marker) = this.marker_fragments.first() {
match marker.style().get_list().list_style_type {
list_style_type::T::disc |
list_style_type::T::none |
list_style_type::T::circle |
list_style_type::T::square |
list_style_type::T::disclosure_open |
list_style_type::T::disclosure_closed => {}
_ => this.block_flow.base.restyle_damage.insert(RESOLVE_GENERATED_CONTENT),
}
}
this
}
}
impl Flow for ListItemFlow {
fn class(&self) -> FlowClass {
FlowClass::ListItem
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn bubble_inline_sizes(&mut self) {
// The marker contributes no intrinsic inline-size, so…
self.block_flow.bubble_inline_sizes()
}
fn assign_inline_sizes(&mut self, shared_context: &SharedStyleContext) {
self.block_flow.assign_inline_sizes(shared_context);
let mut marker_inline_start = self.block_flow.fragment.border_box.start.i;
for marker in self.marker_fragments.iter_mut().rev() {
let containing_block_inline_size = self.block_flow.base.block_container_inline_size;
let container_block_size = self.block_flow.explicit_block_containing_size(shared_context);
marker.assign_replaced_inline_size_if_necessary(containing_block_inline_size, container_block_size);
// Do this now. There's no need to do this in bubble-widths, since markers do not
// contribute to the inline size of this flow.
let intrinsic_inline_sizes = marker.compute_intrinsic_inline_sizes();
marker.border_box.size.inline =
intrinsic_inline_sizes.content_intrinsic_sizes.preferred_inline_size;
marker_inline_start = marker_inline_start - marker.border_box.size.inline;
marker.border_box.start.i = marker_inline_start;
}
}
fn assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
self.block_flow.assign_block_size(layout_context);
for marker in &mut self.marker_fragments {
let containing_block_block_size =
self.block_flow.base.block_container_explicit_block_size;
marker.assign_replaced_block_size_if_necessary(containing_block_block_size);
let font_metrics =
text::font_metrics_for_style(&mut layout_context.font_context(),
marker.style.get_font_arc());
let line_height = text::line_height_from_style(&*marker.style, &font_metrics);
let item_inline_metrics = InlineMetrics::from_font_metrics(&font_metrics, line_height);
let marker_inline_metrics = marker.inline_metrics(layout_context);
marker.border_box.start.b = item_inline_metrics.block_size_above_baseline -
marker_inline_metrics.ascent;
marker.border_box.size.block = marker_inline_metrics.ascent +
marker_inline_metrics.depth_below_baseline;
}
}
fn compute_absolute_position(&mut self, layout_context: &LayoutContext) {
self.block_flow.compute_absolute_position(layout_context)
}
fn place_float_if_applicable<'a>(&mut self) {
self.block_flow.place_float_if_applicable()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
self.build_display_list_for_list_item(state);
}
fn collect_stacking_contexts(&mut self,
parent_id: StackingContextId,
contexts: &mut Vec<Box<StackingContext>>)
-> StackingContextId {
self.block_flow.collect_stacking_contexts(parent_id, contexts)
}
fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) {
self.block_flow.repair_style(new_style)
|
let flow_size = self.block_flow.base.position.size.to_physical(self.block_flow.base.writing_mode);
let relative_containing_block_size =
&self.block_flow.base.early_absolute_position_info.relative_containing_block_size;
for fragment in &self.marker_fragments {
overflow.union(&fragment.compute_overflow(&flow_size, &relative_containing_block_size))
}
overflow
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
/// The 'position' property of this flow.
fn positioning(&self) -> position::T {
self.block_flow.positioning()
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator,
level,
stacking_context_position);
for marker in &self.marker_fragments {
if iterator.should_process(marker) {
iterator.process(
marker,
level,
&marker.stacking_relative_border_box(&self.block_flow
.base
.stacking_relative_position,
&self.block_flow
.base
.early_absolute_position_info
.relative_containing_block_size,
self.block_flow
.base
.early_absolute_position_info
.relative_containing_block_mode,
CoordinateSystem::Own)
.translate(stacking_context_position));
}
}
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
for marker in &mut self.marker_fragments {
(*mutator)(marker)
}
}
}
/// The kind of content that `list-style-type` results in.
pub enum ListStyleTypeContent {
None,
StaticText(char),
GeneratedContent(Box<GeneratedContentInfo>),
}
impl ListStyleTypeContent {
/// Returns the content to be used for the given value of the `list-style-type` property.
pub fn from_list_style_type(list_style_type: list_style_type::T) -> ListStyleTypeContent {
// Just to keep things simple, use a nonbreaking space (Unicode 0xa0) to provide the marker
// separation.
match list_style_type {
list_style_type::T::none => ListStyleTypeContent::None,
list_style_type::T::disc | list_style_type::T::circle | list_style_type::T::square |
list_style_type::T::disclosure_open | list_style_type::T::disclosure_closed => {
let text = generated_content::static_representation(list_style_type);
ListStyleTypeContent::StaticText(text)
}
_ => ListStyleTypeContent::GeneratedContent(box GeneratedContentInfo::ListItem),
}
}
}
|
}
fn compute_overflow(&self) -> Overflow {
let mut overflow = self.block_flow.compute_overflow();
|
random_line_split
|
error.rs
|
use std::error::Error as StdError;
use std::ffi::OsString;
use std::fmt;
use std::io::Error as IoError;
use std::io::ErrorKind as IoErrorKind;
use std::path::StripPrefixError;
/// A list specifying general categories of fs_extra error.
#[derive(Debug)]
pub enum ErrorKind {
/// An entity was not found.
NotFound,
/// The operation lacked the necessary privileges to complete.
PermissionDenied,
/// An entity already exists.
AlreadyExists,
/// This operation was interrupted.
Interrupted,
/// Path does not a directory.
InvalidFolder,
/// Path does not a file.
InvalidFile,
/// Invalid file name.
InvalidFileName,
/// Invalid path.
InvalidPath,
/// Any I/O error.
Io(IoError),
/// Any StripPrefix error.
StripPrefix(StripPrefixError),
/// Any OsString error.
OsString(OsString),
/// Any fs_extra error not part of this list.
Other,
}
impl ErrorKind {
fn as_str(&self) -> &str {
match *self {
ErrorKind::NotFound => "entity not found",
ErrorKind::PermissionDenied => "permission denied",
ErrorKind::AlreadyExists => "entity already exists",
ErrorKind::Interrupted => "operation interrupted",
ErrorKind::Other => "other os error",
ErrorKind::InvalidFolder => "invalid folder error",
ErrorKind::InvalidFile => "invalid file error",
ErrorKind::InvalidFileName => "invalid file name error",
ErrorKind::InvalidPath => "invalid path error",
ErrorKind::Io(_) => "Io error",
ErrorKind::StripPrefix(_) => "Strip prefix error",
ErrorKind::OsString(_) => "OsString error",
}
}
}
/// A specialized Result type for fs_extra operations.
///
/// This typedef is generally used to avoid writing out fs_extra::Error directly
/// and is otherwise a direct mapping to Result.
///
///#Examples
///
/// ```rust,ignore
/// extern crate fs_extra;
/// use fs_extra::dir::create;
///
///fn get_string() -> io::Result<()> {
///
/// create("test_dir")?;
///
/// Ok(())
/// }
/// ```
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error type for fs_extra operations with files and folder.
///
/// Errors mostly originate from the underlying OS, but custom instances of
/// `Error` can be created with crafted error messages and a particular value of
/// [`ErrorKind`].
///
/// [`ErrorKind`]: enum.ErrorKind.html
#[derive(Debug)]
pub struct Error {
/// Type error
pub kind: ErrorKind,
message: String,
}
impl Error {
/// Create a new fs_extra error from a kind of error error as well as an arbitrary error payload.
///
///#Examples
/// ```rust,ignore
///
/// extern crate fs_extra;
/// use fs_extra::error::{Error, ErrorKind};
///
/// errors can be created from strings
/// let custom_error = Error::new(ErrorKind::Other, "Other Error!");
/// // errors can also be created from other errors
/// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error);
///
/// ```
pub fn new(kind: ErrorKind, message: &str) -> Error {
Error {
kind,
message: message.to_string(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
impl StdError for Error {
fn description(&self) -> &str {
self.kind.as_str()
}
}
impl From<StripPrefixError> for Error {
fn from(err: StripPrefixError) -> Error {
Error::new(
ErrorKind::StripPrefix(err),
"StripPrefixError. Look inside for more details",
)
}
}
impl From<OsString> for Error {
fn from(err: OsString) -> Error {
Error::new(
ErrorKind::OsString(err),
"OsString. Look inside for more details",
)
}
}
impl From<IoError> for Error {
fn from(err: IoError) -> Error {
let err_kind: ErrorKind;
match err.kind() {
IoErrorKind::NotFound => err_kind = ErrorKind::NotFound,
IoErrorKind::PermissionDenied => err_kind = ErrorKind::PermissionDenied,
IoErrorKind::AlreadyExists => err_kind = ErrorKind::AlreadyExists,
IoErrorKind::Interrupted => err_kind = ErrorKind::Interrupted,
IoErrorKind::Other => err_kind = ErrorKind::Other,
_ => {
err_kind = ErrorKind::Io(err);
return Error::new(err_kind, "Io error. Look inside err_kind for more details.");
}
}
Error::new(err_kind, &err.to_string())
}
}
|
{
write!(f, "{}", self.message)
}
|
identifier_body
|
error.rs
|
use std::error::Error as StdError;
use std::ffi::OsString;
use std::fmt;
use std::io::Error as IoError;
use std::io::ErrorKind as IoErrorKind;
use std::path::StripPrefixError;
/// A list specifying general categories of fs_extra error.
#[derive(Debug)]
pub enum ErrorKind {
/// An entity was not found.
NotFound,
/// The operation lacked the necessary privileges to complete.
PermissionDenied,
/// An entity already exists.
AlreadyExists,
/// This operation was interrupted.
Interrupted,
/// Path does not a directory.
InvalidFolder,
/// Path does not a file.
InvalidFile,
/// Invalid file name.
InvalidFileName,
/// Invalid path.
InvalidPath,
/// Any I/O error.
Io(IoError),
/// Any StripPrefix error.
|
StripPrefix(StripPrefixError),
/// Any OsString error.
OsString(OsString),
/// Any fs_extra error not part of this list.
Other,
}
impl ErrorKind {
fn as_str(&self) -> &str {
match *self {
ErrorKind::NotFound => "entity not found",
ErrorKind::PermissionDenied => "permission denied",
ErrorKind::AlreadyExists => "entity already exists",
ErrorKind::Interrupted => "operation interrupted",
ErrorKind::Other => "other os error",
ErrorKind::InvalidFolder => "invalid folder error",
ErrorKind::InvalidFile => "invalid file error",
ErrorKind::InvalidFileName => "invalid file name error",
ErrorKind::InvalidPath => "invalid path error",
ErrorKind::Io(_) => "Io error",
ErrorKind::StripPrefix(_) => "Strip prefix error",
ErrorKind::OsString(_) => "OsString error",
}
}
}
/// A specialized Result type for fs_extra operations.
///
/// This typedef is generally used to avoid writing out fs_extra::Error directly
/// and is otherwise a direct mapping to Result.
///
///#Examples
///
/// ```rust,ignore
/// extern crate fs_extra;
/// use fs_extra::dir::create;
///
///fn get_string() -> io::Result<()> {
///
/// create("test_dir")?;
///
/// Ok(())
/// }
/// ```
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error type for fs_extra operations with files and folder.
///
/// Errors mostly originate from the underlying OS, but custom instances of
/// `Error` can be created with crafted error messages and a particular value of
/// [`ErrorKind`].
///
/// [`ErrorKind`]: enum.ErrorKind.html
#[derive(Debug)]
pub struct Error {
/// Type error
pub kind: ErrorKind,
message: String,
}
impl Error {
/// Create a new fs_extra error from a kind of error error as well as an arbitrary error payload.
///
///#Examples
/// ```rust,ignore
///
/// extern crate fs_extra;
/// use fs_extra::error::{Error, ErrorKind};
///
/// errors can be created from strings
/// let custom_error = Error::new(ErrorKind::Other, "Other Error!");
/// // errors can also be created from other errors
/// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error);
///
/// ```
pub fn new(kind: ErrorKind, message: &str) -> Error {
Error {
kind,
message: message.to_string(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl StdError for Error {
fn description(&self) -> &str {
self.kind.as_str()
}
}
impl From<StripPrefixError> for Error {
fn from(err: StripPrefixError) -> Error {
Error::new(
ErrorKind::StripPrefix(err),
"StripPrefixError. Look inside for more details",
)
}
}
impl From<OsString> for Error {
fn from(err: OsString) -> Error {
Error::new(
ErrorKind::OsString(err),
"OsString. Look inside for more details",
)
}
}
impl From<IoError> for Error {
fn from(err: IoError) -> Error {
let err_kind: ErrorKind;
match err.kind() {
IoErrorKind::NotFound => err_kind = ErrorKind::NotFound,
IoErrorKind::PermissionDenied => err_kind = ErrorKind::PermissionDenied,
IoErrorKind::AlreadyExists => err_kind = ErrorKind::AlreadyExists,
IoErrorKind::Interrupted => err_kind = ErrorKind::Interrupted,
IoErrorKind::Other => err_kind = ErrorKind::Other,
_ => {
err_kind = ErrorKind::Io(err);
return Error::new(err_kind, "Io error. Look inside err_kind for more details.");
}
}
Error::new(err_kind, &err.to_string())
}
}
|
random_line_split
|
|
error.rs
|
use std::error::Error as StdError;
use std::ffi::OsString;
use std::fmt;
use std::io::Error as IoError;
use std::io::ErrorKind as IoErrorKind;
use std::path::StripPrefixError;
/// A list specifying general categories of fs_extra error.
#[derive(Debug)]
pub enum ErrorKind {
/// An entity was not found.
NotFound,
/// The operation lacked the necessary privileges to complete.
PermissionDenied,
/// An entity already exists.
AlreadyExists,
/// This operation was interrupted.
Interrupted,
/// Path does not a directory.
InvalidFolder,
/// Path does not a file.
InvalidFile,
/// Invalid file name.
InvalidFileName,
/// Invalid path.
InvalidPath,
/// Any I/O error.
Io(IoError),
/// Any StripPrefix error.
StripPrefix(StripPrefixError),
/// Any OsString error.
OsString(OsString),
/// Any fs_extra error not part of this list.
Other,
}
impl ErrorKind {
fn as_str(&self) -> &str {
match *self {
ErrorKind::NotFound => "entity not found",
ErrorKind::PermissionDenied => "permission denied",
ErrorKind::AlreadyExists => "entity already exists",
ErrorKind::Interrupted => "operation interrupted",
ErrorKind::Other => "other os error",
ErrorKind::InvalidFolder => "invalid folder error",
ErrorKind::InvalidFile => "invalid file error",
ErrorKind::InvalidFileName => "invalid file name error",
ErrorKind::InvalidPath => "invalid path error",
ErrorKind::Io(_) => "Io error",
ErrorKind::StripPrefix(_) => "Strip prefix error",
ErrorKind::OsString(_) => "OsString error",
}
}
}
/// A specialized Result type for fs_extra operations.
///
/// This typedef is generally used to avoid writing out fs_extra::Error directly
/// and is otherwise a direct mapping to Result.
///
///#Examples
///
/// ```rust,ignore
/// extern crate fs_extra;
/// use fs_extra::dir::create;
///
///fn get_string() -> io::Result<()> {
///
/// create("test_dir")?;
///
/// Ok(())
/// }
/// ```
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error type for fs_extra operations with files and folder.
///
/// Errors mostly originate from the underlying OS, but custom instances of
/// `Error` can be created with crafted error messages and a particular value of
/// [`ErrorKind`].
///
/// [`ErrorKind`]: enum.ErrorKind.html
#[derive(Debug)]
pub struct Error {
/// Type error
pub kind: ErrorKind,
message: String,
}
impl Error {
/// Create a new fs_extra error from a kind of error error as well as an arbitrary error payload.
///
///#Examples
/// ```rust,ignore
///
/// extern crate fs_extra;
/// use fs_extra::error::{Error, ErrorKind};
///
/// errors can be created from strings
/// let custom_error = Error::new(ErrorKind::Other, "Other Error!");
/// // errors can also be created from other errors
/// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error);
///
/// ```
pub fn
|
(kind: ErrorKind, message: &str) -> Error {
Error {
kind,
message: message.to_string(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl StdError for Error {
fn description(&self) -> &str {
self.kind.as_str()
}
}
impl From<StripPrefixError> for Error {
fn from(err: StripPrefixError) -> Error {
Error::new(
ErrorKind::StripPrefix(err),
"StripPrefixError. Look inside for more details",
)
}
}
impl From<OsString> for Error {
fn from(err: OsString) -> Error {
Error::new(
ErrorKind::OsString(err),
"OsString. Look inside for more details",
)
}
}
impl From<IoError> for Error {
fn from(err: IoError) -> Error {
let err_kind: ErrorKind;
match err.kind() {
IoErrorKind::NotFound => err_kind = ErrorKind::NotFound,
IoErrorKind::PermissionDenied => err_kind = ErrorKind::PermissionDenied,
IoErrorKind::AlreadyExists => err_kind = ErrorKind::AlreadyExists,
IoErrorKind::Interrupted => err_kind = ErrorKind::Interrupted,
IoErrorKind::Other => err_kind = ErrorKind::Other,
_ => {
err_kind = ErrorKind::Io(err);
return Error::new(err_kind, "Io error. Look inside err_kind for more details.");
}
}
Error::new(err_kind, &err.to_string())
}
}
|
new
|
identifier_name
|
build.rs
|
//! Generate a module with a custom `#[path=...]` for each of the files in our
//! libclang version-specific test expectations so that they get their layout
//! tests run. We need to do this because cargo doesn't automatically detect
//! tests subdirectories.
use std::env;
use std::fs;
use std::io::Write;
use std::path::Path;
const LIBCLANG_VERSION_DIRS: &'static [&'static str] = &[
"libclang-4",
"libclang-5",
"libclang-9",
];
fn main()
|
}
println!("cargo:rerun-if-changed={}", path.display());
let module_name: String = path
.display()
.to_string()
.chars()
.map(|c| match c {
'a'..='z' | 'A'..='Z' | '0'..='9' => c,
_ => '_',
})
.collect();
test_string.push_str(&format!(
r###"
#[path = "{}"]
mod {};
"###,
path.display(),
module_name,
));
}
}
let out_path = Path::new(&env::var_os("OUT_DIR").unwrap())
.join("libclang_version_specific_generated_tests.rs");
let mut test_file = fs::File::create(out_path).unwrap();
test_file.write_all(test_string.as_bytes()).unwrap();
}
|
{
println!("cargo:rerun-if-changed=build.rs");
let mut test_string = String::new();
for dir in LIBCLANG_VERSION_DIRS {
let dir = Path::new(&env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("tests")
.join(dir);
println!("cargo:rerun-if-changed={}", dir.display());
for entry in fs::read_dir(dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let path = path.canonicalize().unwrap_or_else(|_| path.into());
if path.extension().map(|e| e.to_string_lossy()) !=
Some("rs".into())
{
continue;
|
identifier_body
|
build.rs
|
//! Generate a module with a custom `#[path=...]` for each of the files in our
//! libclang version-specific test expectations so that they get their layout
//! tests run. We need to do this because cargo doesn't automatically detect
//! tests subdirectories.
use std::env;
use std::fs;
use std::io::Write;
use std::path::Path;
const LIBCLANG_VERSION_DIRS: &'static [&'static str] = &[
"libclang-4",
"libclang-5",
"libclang-9",
];
fn main() {
println!("cargo:rerun-if-changed=build.rs");
let mut test_string = String::new();
for dir in LIBCLANG_VERSION_DIRS {
let dir = Path::new(&env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("tests")
.join(dir);
println!("cargo:rerun-if-changed={}", dir.display());
for entry in fs::read_dir(dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let path = path.canonicalize().unwrap_or_else(|_| path.into());
if path.extension().map(|e| e.to_string_lossy())!=
Some("rs".into())
{
continue;
}
println!("cargo:rerun-if-changed={}", path.display());
let module_name: String = path
.display()
.to_string()
.chars()
.map(|c| match c {
'a'..='z' | 'A'..='Z' | '0'..='9' => c,
_ => '_',
})
|
test_string.push_str(&format!(
r###"
#[path = "{}"]
mod {};
"###,
path.display(),
module_name,
));
}
}
let out_path = Path::new(&env::var_os("OUT_DIR").unwrap())
.join("libclang_version_specific_generated_tests.rs");
let mut test_file = fs::File::create(out_path).unwrap();
test_file.write_all(test_string.as_bytes()).unwrap();
}
|
.collect();
|
random_line_split
|
build.rs
|
//! Generate a module with a custom `#[path=...]` for each of the files in our
//! libclang version-specific test expectations so that they get their layout
//! tests run. We need to do this because cargo doesn't automatically detect
//! tests subdirectories.
use std::env;
use std::fs;
use std::io::Write;
use std::path::Path;
const LIBCLANG_VERSION_DIRS: &'static [&'static str] = &[
"libclang-4",
"libclang-5",
"libclang-9",
];
fn
|
() {
println!("cargo:rerun-if-changed=build.rs");
let mut test_string = String::new();
for dir in LIBCLANG_VERSION_DIRS {
let dir = Path::new(&env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("tests")
.join(dir);
println!("cargo:rerun-if-changed={}", dir.display());
for entry in fs::read_dir(dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let path = path.canonicalize().unwrap_or_else(|_| path.into());
if path.extension().map(|e| e.to_string_lossy())!=
Some("rs".into())
{
continue;
}
println!("cargo:rerun-if-changed={}", path.display());
let module_name: String = path
.display()
.to_string()
.chars()
.map(|c| match c {
'a'..='z' | 'A'..='Z' | '0'..='9' => c,
_ => '_',
})
.collect();
test_string.push_str(&format!(
r###"
#[path = "{}"]
mod {};
"###,
path.display(),
module_name,
));
}
}
let out_path = Path::new(&env::var_os("OUT_DIR").unwrap())
.join("libclang_version_specific_generated_tests.rs");
let mut test_file = fs::File::create(out_path).unwrap();
test_file.write_all(test_string.as_bytes()).unwrap();
}
|
main
|
identifier_name
|
glyph.rs
|
u32 = 0x0000FFFF;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
#[inline]
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
/// True if original char was normal (U+0020) space. Other chars may
/// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_space(&mut self) {
self.value |= FLAG_CHAR_IS_SPACE;
}
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
(self.value & GLYPH_COUNT_MASK) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: ByteIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone, Deserialize, Serialize)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: ByteIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.extend_from_slice(glyphs);
self.lookup_is_sorted = false;
}
fn detailed_glyphs_for_entry(&'a self, entry_offset: ByteIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn detailed_glyph_with_index(&'a self,
entry_offset: ByteIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records: Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, ByteIndex),
Detail(&'a GlyphStore, ByteIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
#[inline]
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
pub fn char_is_space(self) -> bool {
let (store, entry_i) = match self {
GlyphInfo::Simple(store, entry_i) => (store, entry_i),
GlyphInfo::Detail(store, entry_i, _) => (store, entry_i),
};
store.char_is_space(entry_i)
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone, Deserialize, Serialize)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text.
/// Any changes will also need to be reflected in
/// transmute_entry_buffer_to_u32_buffer().
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
/// A cache of the advance of the entire glyph store.
total_advance: Au,
/// A cache of the number of spaces in the entire glyph store.
total_spaces: i32,
/// Used to check if fast path should be used in glyph iteration.
has_detailed_glyphs: bool,
is_whitespace: bool,
is_rtl: bool,
}
int_range_index! {
#[derive(Deserialize, Serialize, RustcEncodable)]
#[doc = "An index that refers to a byte offset in a text run. This could \
point to the middle of a glyph."]
#[derive(HeapSizeOf)]
struct ByteIndex(isize)
}
impl<'a> GlyphStore {
/// Initializes the glyph store, but doesn't actually shape anything.
///
/// Use the `add_*` methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool, is_rtl: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: vec![GlyphEntry::initial(); length],
detail_store: DetailedGlyphStore::new(),
total_advance: Au(0),
total_spaces: 0,
has_detailed_glyphs: false,
is_whitespace: is_whitespace,
is_rtl: is_rtl,
}
}
#[inline]
pub fn len(&self) -> ByteIndex {
ByteIndex(self.entry_buffer.len() as isize)
}
#[inline]
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
self.cache_total_advance_and_spaces()
}
#[inline(never)]
fn cache_total_advance_and_spaces(&mut self) {
let mut total_advance = Au(0);
let mut total_spaces = 0;
for glyph in self.iter_glyphs_for_byte_range(&Range::new(ByteIndex(0), self.len())) {
total_advance = total_advance + glyph.advance();
if glyph.char_is_space() {
total_spaces += 1;
}
}
self.total_advance = total_advance;
self.total_spaces = total_spaces;
}
/// Adds a single glyph.
pub fn add_glyph_for_byte_index(&mut self,
i: ByteIndex,
character: char,
data: &GlyphData) {
let glyph_is_compressible = is_simple_glyph_id(data.id) &&
is_simple_advance(data.advance) &&
data.offset == Point2D::zero() &&
data.cluster_start; // others are stored in detail buffer
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.len());
let mut entry = if glyph_is_compressible {
GlyphEntry::simple(data.id, data.advance)
} else {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
};
if character =='' {
entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_byte_index(&mut self, i: ByteIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
let entry = GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
#[inline]
pub fn iter_glyphs_for_byte_range(&'a self, range: &Range<ByteIndex>) -> GlyphIterator<'a> {
if range.begin() >= self.len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if range.end() > self.len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
byte_index: if self.is_rtl { range.end() } else { range.begin() - ByteIndex(1) },
byte_range: *range,
glyph_range: None,
}
}
#[inline]
pub fn advance_for_byte_range(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
if range.begin() == ByteIndex(0) && range.end() == self.len() {
self.total_advance + extra_word_spacing * self.total_spaces
} else if!self.has_detailed_glyphs {
self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing)
} else {
self.advance_for_byte_range_slow_path(range, extra_word_spacing)
}
}
#[inline]
pub fn advance_for_byte_range_slow_path(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
self.iter_glyphs_for_byte_range(range)
.fold(Au(0), |advance, glyph| {
if glyph.char_is_space() {
advance + glyph.advance() + extra_word_spacing
} else {
advance + glyph.advance()
}
})
}
#[inline]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
fn advance_for_byte_range_simple_glyphs(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
let advance_mask = u32x4::splat(GLYPH_ADVANCE_MASK);
let space_flag_mask = u32x4::splat(FLAG_CHAR_IS_SPACE);
let mut simd_advance = u32x4::splat(0);
let mut simd_spaces = u32x4::splat(0);
let begin = range.begin().to_usize();
let len = range.length().to_usize();
let num_simd_iterations = len / 4;
let leftover_entries = range.end().to_usize() - (len - num_simd_iterations * 4);
let buf = self.transmute_entry_buffer_to_u32_buffer();
for i in 0..num_simd_iterations {
let v = u32x4::load(buf, begin + i * 4);
let advance = (v & advance_mask) >> GLYPH_ADVANCE_SHIFT;
let spaces = (v & space_flag_mask) >> FLAG_CHAR_IS_SPACE_SHIFT;
simd_advance = simd_advance + advance;
simd_spaces = simd_spaces + spaces;
}
let advance =
(simd_advance.extract(0) +
simd_advance.extract(1) +
simd_advance.extract(2) +
simd_advance.extract(3)) as i32;
let spaces =
(simd_spaces.extract(0) +
simd_spaces.extract(1) +
simd_spaces.extract(2) +
simd_spaces.extract(3)) as i32;
let mut leftover_advance = Au(0);
let mut leftover_spaces = 0;
for i in leftover_entries..range.end().to_usize() {
leftover_advance = leftover_advance + self.entry_buffer[i].advance();
if self.entry_buffer[i].char_is_space() {
leftover_spaces += 1;
}
}
Au(advance) + leftover_advance + extra_word_spacing * (spaces + leftover_spaces)
}
/// When SIMD isn't available (non-x86_x64/aarch64), fallback to the slow path.
#[inline]
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
fn advance_for_byte_range_simple_glyphs(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
|
self.advance_for_byte_range_slow_path(range, extra_word_spacing)
}
|
random_line_split
|
|
glyph.rs
|
_SPACE_SHIFT: u32 = 30;
const FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
const GLYPH_ADVANCE_MASK: u32 = 0x3FFF0000;
const GLYPH_ADVANCE_SHIFT: u32 = 16;
const GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char.
const GLYPH_COUNT_MASK: u32 = 0x0000FFFF;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
#[inline]
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
/// True if original char was normal (U+0020) space. Other chars may
/// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_space(&mut self) {
self.value |= FLAG_CHAR_IS_SPACE;
}
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
(self.value & GLYPH_COUNT_MASK) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn
|
(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: ByteIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone, Deserialize, Serialize)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: ByteIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.extend_from_slice(glyphs);
self.lookup_is_sorted = false;
}
fn detailed_glyphs_for_entry(&'a self, entry_offset: ByteIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn detailed_glyph_with_index(&'a self,
entry_offset: ByteIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records: Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, ByteIndex),
Detail(&'a GlyphStore, ByteIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
#[inline]
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
pub fn char_is_space(self) -> bool {
let (store, entry_i) = match self {
GlyphInfo::Simple(store, entry_i) => (store, entry_i),
GlyphInfo::Detail(store, entry_i, _) => (store, entry_i),
};
store.char_is_space(entry_i)
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone, Deserialize, Serialize)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text.
/// Any changes will also need to be reflected in
/// transmute_entry_buffer_to_u32_buffer().
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
/// A cache of the advance of the entire glyph store.
total_advance: Au,
/// A cache of the number of spaces in the entire glyph store.
total_spaces: i32,
/// Used to check if fast path should be used in glyph iteration.
has_detailed_glyphs: bool,
is_whitespace: bool,
is_rtl: bool,
}
int_range_index! {
#[derive(Deserialize, Serialize, RustcEncodable)]
#[doc = "An index that refers to a byte offset in a text run. This could \
point to the middle of a glyph."]
#[derive(HeapSizeOf)]
struct ByteIndex(isize)
}
impl<'a> GlyphStore {
/// Initializes the glyph store, but doesn't actually shape anything.
///
/// Use the `add_*` methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool, is_rtl: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: vec![GlyphEntry::initial(); length],
detail_store: DetailedGlyphStore::new(),
total_advance: Au(0),
total_spaces: 0,
has_detailed_glyphs: false,
is_whitespace: is_whitespace,
is_rtl: is_rtl,
}
}
#[inline]
pub fn len(&self) -> ByteIndex {
ByteIndex(self.entry_buffer.len() as isize)
}
#[inline]
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
self.cache_total_advance_and_spaces()
}
#[inline(never)]
fn cache_total_advance_and_spaces(&mut self) {
let mut total_advance = Au(0);
let mut total_spaces = 0;
for glyph in self.iter_glyphs_for_byte_range(&Range::new(ByteIndex(0), self.len())) {
total_advance = total_advance + glyph.advance();
if glyph.char_is_space() {
total_spaces += 1;
}
}
self.total_advance = total_advance;
self.total_spaces = total_spaces;
}
/// Adds a single glyph.
pub fn add_glyph_for_byte_index(&mut self,
i: ByteIndex,
character: char,
data: &GlyphData) {
let glyph_is_compressible = is_simple_glyph_id(data.id) &&
is_simple_advance(data.advance) &&
data.offset == Point2D::zero() &&
data.cluster_start; // others are stored in detail buffer
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.len());
let mut entry = if glyph_is_compressible {
GlyphEntry::simple(data.id, data.advance)
} else {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
};
if character =='' {
entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_byte_index(&mut self, i: ByteIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
let entry = GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
#[inline]
pub fn iter_glyphs_for_byte_range(&'a self, range: &Range<ByteIndex>) -> GlyphIterator<'a> {
if range.begin() >= self.len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if range.end() > self.len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
byte_index: if self.is_rtl { range.end() } else { range.begin() - ByteIndex(1) },
byte_range: *range,
glyph_range: None,
}
}
#[inline]
pub fn advance_for_byte_range(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
if range.begin() == ByteIndex(0) && range.end() == self.len() {
self.total_advance + extra_word_spacing * self.total_spaces
} else if!self.has_detailed_glyphs {
self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing)
} else {
self.advance_for_byte_range_slow_path(range, extra_word_spacing)
}
}
#[inline]
pub fn advance_for_byte_range_slow_path(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
self.iter_glyphs_for_byte_range(range)
.fold(Au(0), |advance, glyph| {
if glyph.char_is_space() {
advance + glyph.advance() + extra_word_spacing
} else {
advance + glyph.advance()
}
})
}
#[inline]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
fn advance_for_byte_range_simple_glyphs(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
let advance_mask = u32x4::splat(GLYPH_ADVANCE_MASK);
let space_flag_mask = u32x4::splat(FLAG_CHAR_IS_SPACE);
let mut simd_advance = u32x4::splat(0);
let mut simd_spaces = u32x4::splat(0);
let begin = range.begin().to_usize();
let len = range.length().to_usize();
let num_simd_iterations = len / 4;
let leftover_entries = range.end().to_usize() - (len - num_simd_iterations * 4);
let buf = self.transmute_entry_buffer_to_u32_buffer();
for i in 0..num_simd_iterations {
let v = u32x4::load(buf, begin + i * 4);
let advance = (v & advance_mask) >> GLYPH_ADVANCE_SHIFT;
let spaces = (v & space_flag_mask) >> FLAG_CHAR_IS_SPACE_SHIFT;
simd_advance = simd_advance + advance;
simd_spaces = simd_spaces + spaces;
}
let advance =
(simd_advance.extract(0) +
simd_advance.extract(1) +
simd_advance.extract(2) +
simd_advance.extract(3)) as i32;
let spaces =
(simd_spaces.extract(0) +
simd_spaces.extract(1) +
simd_spaces.extract(2) +
simd_spaces.extract(3)) as i32;
let mut leftover_advance = Au(0);
let mut leftover_spaces = 0;
for i in leftover_entries..range.end().to_usize() {
leftover_advance = leftover_advance + self.entry_buffer[i].advance();
if self.entry_buffer[i].char_is_
|
has_flag
|
identifier_name
|
glyph.rs
|
_SPACE_SHIFT: u32 = 30;
const FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
const GLYPH_ADVANCE_MASK: u32 = 0x3FFF0000;
const GLYPH_ADVANCE_SHIFT: u32 = 16;
const GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char.
const GLYPH_COUNT_MASK: u32 = 0x0000FFFF;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
#[inline]
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
/// True if original char was normal (U+0020) space. Other chars may
/// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_space(&mut self) {
self.value |= FLAG_CHAR_IS_SPACE;
}
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
(self.value & GLYPH_COUNT_MASK) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: ByteIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone, Deserialize, Serialize)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: ByteIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.extend_from_slice(glyphs);
self.lookup_is_sorted = false;
}
fn detailed_glyphs_for_entry(&'a self, entry_offset: ByteIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn detailed_glyph_with_index(&'a self,
entry_offset: ByteIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records: Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, ByteIndex),
Detail(&'a GlyphStore, ByteIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
#[inline]
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
pub fn char_is_space(self) -> bool {
let (store, entry_i) = match self {
GlyphInfo::Simple(store, entry_i) => (store, entry_i),
GlyphInfo::Detail(store, entry_i, _) => (store, entry_i),
};
store.char_is_space(entry_i)
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone, Deserialize, Serialize)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text.
/// Any changes will also need to be reflected in
/// transmute_entry_buffer_to_u32_buffer().
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
/// A cache of the advance of the entire glyph store.
total_advance: Au,
/// A cache of the number of spaces in the entire glyph store.
total_spaces: i32,
/// Used to check if fast path should be used in glyph iteration.
has_detailed_glyphs: bool,
is_whitespace: bool,
is_rtl: bool,
}
int_range_index! {
#[derive(Deserialize, Serialize, RustcEncodable)]
#[doc = "An index that refers to a byte offset in a text run. This could \
point to the middle of a glyph."]
#[derive(HeapSizeOf)]
struct ByteIndex(isize)
}
impl<'a> GlyphStore {
/// Initializes the glyph store, but doesn't actually shape anything.
///
/// Use the `add_*` methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool, is_rtl: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: vec![GlyphEntry::initial(); length],
detail_store: DetailedGlyphStore::new(),
total_advance: Au(0),
total_spaces: 0,
has_detailed_glyphs: false,
is_whitespace: is_whitespace,
is_rtl: is_rtl,
}
}
#[inline]
pub fn len(&self) -> ByteIndex {
ByteIndex(self.entry_buffer.len() as isize)
}
#[inline]
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
self.cache_total_advance_and_spaces()
}
#[inline(never)]
fn cache_total_advance_and_spaces(&mut self) {
let mut total_advance = Au(0);
let mut total_spaces = 0;
for glyph in self.iter_glyphs_for_byte_range(&Range::new(ByteIndex(0), self.len())) {
total_advance = total_advance + glyph.advance();
if glyph.char_is_space() {
total_spaces += 1;
}
}
self.total_advance = total_advance;
self.total_spaces = total_spaces;
}
/// Adds a single glyph.
pub fn add_glyph_for_byte_index(&mut self,
i: ByteIndex,
character: char,
data: &GlyphData)
|
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_byte_index(&mut self, i: ByteIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
let entry = GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
#[inline]
pub fn iter_glyphs_for_byte_range(&'a self, range: &Range<ByteIndex>) -> GlyphIterator<'a> {
if range.begin() >= self.len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if range.end() > self.len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
byte_index: if self.is_rtl { range.end() } else { range.begin() - ByteIndex(1) },
byte_range: *range,
glyph_range: None,
}
}
#[inline]
pub fn advance_for_byte_range(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
if range.begin() == ByteIndex(0) && range.end() == self.len() {
self.total_advance + extra_word_spacing * self.total_spaces
} else if!self.has_detailed_glyphs {
self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing)
} else {
self.advance_for_byte_range_slow_path(range, extra_word_spacing)
}
}
#[inline]
pub fn advance_for_byte_range_slow_path(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
self.iter_glyphs_for_byte_range(range)
.fold(Au(0), |advance, glyph| {
if glyph.char_is_space() {
advance + glyph.advance() + extra_word_spacing
} else {
advance + glyph.advance()
}
})
}
#[inline]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
fn advance_for_byte_range_simple_glyphs(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
let advance_mask = u32x4::splat(GLYPH_ADVANCE_MASK);
let space_flag_mask = u32x4::splat(FLAG_CHAR_IS_SPACE);
let mut simd_advance = u32x4::splat(0);
let mut simd_spaces = u32x4::splat(0);
let begin = range.begin().to_usize();
let len = range.length().to_usize();
let num_simd_iterations = len / 4;
let leftover_entries = range.end().to_usize() - (len - num_simd_iterations * 4);
let buf = self.transmute_entry_buffer_to_u32_buffer();
for i in 0..num_simd_iterations {
let v = u32x4::load(buf, begin + i * 4);
let advance = (v & advance_mask) >> GLYPH_ADVANCE_SHIFT;
let spaces = (v & space_flag_mask) >> FLAG_CHAR_IS_SPACE_SHIFT;
simd_advance = simd_advance + advance;
simd_spaces = simd_spaces + spaces;
}
let advance =
(simd_advance.extract(0) +
simd_advance.extract(1) +
simd_advance.extract(2) +
simd_advance.extract(3)) as i32;
let spaces =
(simd_spaces.extract(0) +
simd_spaces.extract(1) +
simd_spaces.extract(2) +
simd_spaces.extract(3)) as i32;
let mut leftover_advance = Au(0);
let mut leftover_spaces = 0;
for i in leftover_entries..range.end().to_usize() {
leftover_advance = leftover_advance + self.entry_buffer[i].advance();
if self.entry_buffer[i].char_is_
|
{
let glyph_is_compressible = is_simple_glyph_id(data.id) &&
is_simple_advance(data.advance) &&
data.offset == Point2D::zero() &&
data.cluster_start; // others are stored in detail buffer
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.len());
let mut entry = if glyph_is_compressible {
GlyphEntry::simple(data.id, data.advance)
} else {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
};
if character == ' ' {
entry.set_char_is_space()
|
identifier_body
|
glyph.rs
|
_SPACE_SHIFT: u32 = 30;
const FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
const GLYPH_ADVANCE_MASK: u32 = 0x3FFF0000;
const GLYPH_ADVANCE_SHIFT: u32 = 16;
const GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char.
const GLYPH_COUNT_MASK: u32 = 0x0000FFFF;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
#[inline]
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
/// True if original char was normal (U+0020) space. Other chars may
/// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_space(&mut self) {
self.value |= FLAG_CHAR_IS_SPACE;
}
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
(self.value & GLYPH_COUNT_MASK) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy, Deserialize, Serialize)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: ByteIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone, Deserialize, Serialize)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: ByteIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.extend_from_slice(glyphs);
self.lookup_is_sorted = false;
}
fn detailed_glyphs_for_entry(&'a self, entry_offset: ByteIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn detailed_glyph_with_index(&'a self,
entry_offset: ByteIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records: Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else
|
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, ByteIndex),
Detail(&'a GlyphStore, ByteIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
#[inline]
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
pub fn char_is_space(self) -> bool {
let (store, entry_i) = match self {
GlyphInfo::Simple(store, entry_i) => (store, entry_i),
GlyphInfo::Detail(store, entry_i, _) => (store, entry_i),
};
store.char_is_space(entry_i)
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone, Deserialize, Serialize)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text.
/// Any changes will also need to be reflected in
/// transmute_entry_buffer_to_u32_buffer().
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
/// A cache of the advance of the entire glyph store.
total_advance: Au,
/// A cache of the number of spaces in the entire glyph store.
total_spaces: i32,
/// Used to check if fast path should be used in glyph iteration.
has_detailed_glyphs: bool,
is_whitespace: bool,
is_rtl: bool,
}
int_range_index! {
#[derive(Deserialize, Serialize, RustcEncodable)]
#[doc = "An index that refers to a byte offset in a text run. This could \
point to the middle of a glyph."]
#[derive(HeapSizeOf)]
struct ByteIndex(isize)
}
impl<'a> GlyphStore {
/// Initializes the glyph store, but doesn't actually shape anything.
///
/// Use the `add_*` methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool, is_rtl: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: vec![GlyphEntry::initial(); length],
detail_store: DetailedGlyphStore::new(),
total_advance: Au(0),
total_spaces: 0,
has_detailed_glyphs: false,
is_whitespace: is_whitespace,
is_rtl: is_rtl,
}
}
#[inline]
pub fn len(&self) -> ByteIndex {
ByteIndex(self.entry_buffer.len() as isize)
}
#[inline]
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
self.cache_total_advance_and_spaces()
}
#[inline(never)]
fn cache_total_advance_and_spaces(&mut self) {
let mut total_advance = Au(0);
let mut total_spaces = 0;
for glyph in self.iter_glyphs_for_byte_range(&Range::new(ByteIndex(0), self.len())) {
total_advance = total_advance + glyph.advance();
if glyph.char_is_space() {
total_spaces += 1;
}
}
self.total_advance = total_advance;
self.total_spaces = total_spaces;
}
/// Adds a single glyph.
pub fn add_glyph_for_byte_index(&mut self,
i: ByteIndex,
character: char,
data: &GlyphData) {
let glyph_is_compressible = is_simple_glyph_id(data.id) &&
is_simple_advance(data.advance) &&
data.offset == Point2D::zero() &&
data.cluster_start; // others are stored in detail buffer
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.len());
let mut entry = if glyph_is_compressible {
GlyphEntry::simple(data.id, data.advance)
} else {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
};
if character =='' {
entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_byte_index(&mut self, i: ByteIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.has_detailed_glyphs = true;
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
let entry = GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
#[inline]
pub fn iter_glyphs_for_byte_range(&'a self, range: &Range<ByteIndex>) -> GlyphIterator<'a> {
if range.begin() >= self.len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if range.end() > self.len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
byte_index: if self.is_rtl { range.end() } else { range.begin() - ByteIndex(1) },
byte_range: *range,
glyph_range: None,
}
}
#[inline]
pub fn advance_for_byte_range(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
if range.begin() == ByteIndex(0) && range.end() == self.len() {
self.total_advance + extra_word_spacing * self.total_spaces
} else if!self.has_detailed_glyphs {
self.advance_for_byte_range_simple_glyphs(range, extra_word_spacing)
} else {
self.advance_for_byte_range_slow_path(range, extra_word_spacing)
}
}
#[inline]
pub fn advance_for_byte_range_slow_path(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
self.iter_glyphs_for_byte_range(range)
.fold(Au(0), |advance, glyph| {
if glyph.char_is_space() {
advance + glyph.advance() + extra_word_spacing
} else {
advance + glyph.advance()
}
})
}
#[inline]
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
fn advance_for_byte_range_simple_glyphs(&self, range: &Range<ByteIndex>, extra_word_spacing: Au) -> Au {
let advance_mask = u32x4::splat(GLYPH_ADVANCE_MASK);
let space_flag_mask = u32x4::splat(FLAG_CHAR_IS_SPACE);
let mut simd_advance = u32x4::splat(0);
let mut simd_spaces = u32x4::splat(0);
let begin = range.begin().to_usize();
let len = range.length().to_usize();
let num_simd_iterations = len / 4;
let leftover_entries = range.end().to_usize() - (len - num_simd_iterations * 4);
let buf = self.transmute_entry_buffer_to_u32_buffer();
for i in 0..num_simd_iterations {
let v = u32x4::load(buf, begin + i * 4);
let advance = (v & advance_mask) >> GLYPH_ADVANCE_SHIFT;
let spaces = (v & space_flag_mask) >> FLAG_CHAR_IS_SPACE_SHIFT;
simd_advance = simd_advance + advance;
simd_spaces = simd_spaces + spaces;
}
let advance =
(simd_advance.extract(0) +
simd_advance.extract(1) +
simd_advance.extract(2) +
simd_advance.extract(3)) as i32;
let spaces =
(simd_spaces.extract(0) +
simd_spaces.extract(1) +
simd_spaces.extract(2) +
simd_spaces.extract(3)) as i32;
let mut leftover_advance = Au(0);
let mut leftover_spaces = 0;
for i in leftover_entries..range.end().to_usize() {
leftover_advance = leftover_advance + self.entry_buffer[i].advance();
if self.entry_buffer[i].char_is_
|
{
Ordering::Greater
}
|
conditional_block
|
recolor.rs
|
use {RendTri, Shape, Color};
/// `Recolor` represents a shape which has had its color set to a new one.
#[derive(Copy, Clone, Debug)]
pub struct Recolor<S> {
shape: S,
color: Color,
}
impl<S> Recolor<S> {
pub(crate) fn new(shape: S, color: Color) -> Self
|
}
impl<S> IntoIterator for Recolor<S>
where
S: Shape,
{
type Item = RendTri;
type IntoIter = RecolorIter<S::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
RecolorIter {
iter: self.shape.into_iter(),
color: self.color,
}
}
}
/// Iterator which is produced by `Recolor`
#[derive(Clone, Debug)]
pub struct RecolorIter<I> {
iter: I,
color: Color,
}
impl<I> Iterator for RecolorIter<I>
where
I: Iterator<Item = RendTri>,
{
type Item = RendTri;
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|t| t.map_color(|_| self.color))
}
}
|
{
Recolor {
shape: shape,
color: color,
}
}
|
identifier_body
|
recolor.rs
|
use {RendTri, Shape, Color};
|
shape: S,
color: Color,
}
impl<S> Recolor<S> {
pub(crate) fn new(shape: S, color: Color) -> Self {
Recolor {
shape: shape,
color: color,
}
}
}
impl<S> IntoIterator for Recolor<S>
where
S: Shape,
{
type Item = RendTri;
type IntoIter = RecolorIter<S::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
RecolorIter {
iter: self.shape.into_iter(),
color: self.color,
}
}
}
/// Iterator which is produced by `Recolor`
#[derive(Clone, Debug)]
pub struct RecolorIter<I> {
iter: I,
color: Color,
}
impl<I> Iterator for RecolorIter<I>
where
I: Iterator<Item = RendTri>,
{
type Item = RendTri;
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|t| t.map_color(|_| self.color))
}
}
|
/// `Recolor` represents a shape which has had its color set to a new one.
#[derive(Copy, Clone, Debug)]
pub struct Recolor<S> {
|
random_line_split
|
recolor.rs
|
use {RendTri, Shape, Color};
/// `Recolor` represents a shape which has had its color set to a new one.
#[derive(Copy, Clone, Debug)]
pub struct
|
<S> {
shape: S,
color: Color,
}
impl<S> Recolor<S> {
pub(crate) fn new(shape: S, color: Color) -> Self {
Recolor {
shape: shape,
color: color,
}
}
}
impl<S> IntoIterator for Recolor<S>
where
S: Shape,
{
type Item = RendTri;
type IntoIter = RecolorIter<S::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
RecolorIter {
iter: self.shape.into_iter(),
color: self.color,
}
}
}
/// Iterator which is produced by `Recolor`
#[derive(Clone, Debug)]
pub struct RecolorIter<I> {
iter: I,
color: Color,
}
impl<I> Iterator for RecolorIter<I>
where
I: Iterator<Item = RendTri>,
{
type Item = RendTri;
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|t| t.map_color(|_| self.color))
}
}
|
Recolor
|
identifier_name
|
actor_system.rs
|
use threadpool::ThreadPool;
use actor::Role;
use actor_ref::ActorRef;
use std::collections::HashMap;
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
/// A central actor system which manages the actor references and actors
///
/// Spawns the actor system with a specified number of threads in the
/// central thread pool. We suggest you spin up the same number of cores
/// that you have in your system.
///
/// You can use the num_cpus crate to estimate the number of cores on your
/// system
///
///
///
pub struct ActorSystem<'sys, 'b:'sys> {
// We can alternatively store actors in hashes so that they can be
// accessed by name. Depending on how actors are referenced this
// could be a more efficient way of referencing actors
pub pool: ThreadPool,
pub actor_refs: Rc<RefCell<HashMap<String, ActorRef<'sys, 'b>>>>
// pub actors: Rc<RefCell<HashMap<String, <Box<Role + Send +'static>>>>>
}
impl <'sys, 'b>ActorSystem<'sys, 'b> {
pub fn
|
(thread_count: usize) -> ActorSystem<'sys, 'b> {
ActorSystem {
pool: ThreadPool::new(thread_count),
actor_refs: Rc::new(RefCell::new(HashMap::<String, ActorRef<'sys, 'b>>::new())),
}
}
pub fn spawn_actor(&'sys self, name: String, role: Box<Role + Sync + Send +'static>) -> ActorRef<'sys, 'b> {
let arc_role = Arc::new(role);
let actor_ref = ActorRef::new(&self, arc_role.clone());
// let actor_ref = ActorRef::new(&self.pool, arc_role.clone());
{
let mut actor_refs = self.actor_refs.borrow_mut();
actor_refs.insert(name.clone(), actor_ref.clone());
}
let actor_refs = self.actor_refs.borrow().get(&name.clone()).unwrap().clone();
return actor_refs;
}
}
|
new
|
identifier_name
|
actor_system.rs
|
use threadpool::ThreadPool;
use actor::Role;
use actor_ref::ActorRef;
use std::collections::HashMap;
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
/// A central actor system which manages the actor references and actors
///
/// Spawns the actor system with a specified number of threads in the
/// central thread pool. We suggest you spin up the same number of cores
/// that you have in your system.
///
/// You can use the num_cpus crate to estimate the number of cores on your
/// system
///
///
|
// accessed by name. Depending on how actors are referenced this
// could be a more efficient way of referencing actors
pub pool: ThreadPool,
pub actor_refs: Rc<RefCell<HashMap<String, ActorRef<'sys, 'b>>>>
// pub actors: Rc<RefCell<HashMap<String, <Box<Role + Send +'static>>>>>
}
impl <'sys, 'b>ActorSystem<'sys, 'b> {
pub fn new(thread_count: usize) -> ActorSystem<'sys, 'b> {
ActorSystem {
pool: ThreadPool::new(thread_count),
actor_refs: Rc::new(RefCell::new(HashMap::<String, ActorRef<'sys, 'b>>::new())),
}
}
pub fn spawn_actor(&'sys self, name: String, role: Box<Role + Sync + Send +'static>) -> ActorRef<'sys, 'b> {
let arc_role = Arc::new(role);
let actor_ref = ActorRef::new(&self, arc_role.clone());
// let actor_ref = ActorRef::new(&self.pool, arc_role.clone());
{
let mut actor_refs = self.actor_refs.borrow_mut();
actor_refs.insert(name.clone(), actor_ref.clone());
}
let actor_refs = self.actor_refs.borrow().get(&name.clone()).unwrap().clone();
return actor_refs;
}
}
|
///
pub struct ActorSystem<'sys, 'b: 'sys> {
// We can alternatively store actors in hashes so that they can be
|
random_line_split
|
type-parameter-defaults-referencing-Self-ppaux.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a default that references `Self` which is then used in an
// object type. Issue #18956. In this case, the value is supplied by
// the user, but pretty-printing the type during the error message
// caused an ICE.
trait MyAdd<Rhs=Self> { fn add(&self, other: &Rhs) -> Self; }
impl MyAdd for i32 {
fn
|
(&self, other: &i32) -> i32 { *self + *other }
}
fn main() {
let x: i32 = 5;
let y = x as MyAdd<i32>;
//~^ ERROR as `MyAdd<i32>`
}
|
add
|
identifier_name
|
type-parameter-defaults-referencing-Self-ppaux.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a default that references `Self` which is then used in an
// object type. Issue #18956. In this case, the value is supplied by
// the user, but pretty-printing the type during the error message
// caused an ICE.
trait MyAdd<Rhs=Self> { fn add(&self, other: &Rhs) -> Self; }
impl MyAdd for i32 {
fn add(&self, other: &i32) -> i32 { *self + *other }
}
fn main()
|
{
let x: i32 = 5;
let y = x as MyAdd<i32>;
//~^ ERROR as `MyAdd<i32>`
}
|
identifier_body
|
|
type-parameter-defaults-referencing-Self-ppaux.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a default that references `Self` which is then used in an
// object type. Issue #18956. In this case, the value is supplied by
|
impl MyAdd for i32 {
fn add(&self, other: &i32) -> i32 { *self + *other }
}
fn main() {
let x: i32 = 5;
let y = x as MyAdd<i32>;
//~^ ERROR as `MyAdd<i32>`
}
|
// the user, but pretty-printing the type during the error message
// caused an ICE.
trait MyAdd<Rhs=Self> { fn add(&self, other: &Rhs) -> Self; }
|
random_line_split
|
renderer.rs
|
extern crate raster;
use std::collections::LinkedList;
use std::cell::RefCell;
use std::rc::Rc;
use geometric::Geometric2D;
pub struct Renderer<'a> {
vertices: LinkedList<&'a Rc<RefCell<Box<Geometric2D>>>>,
image: raster::Image
}
|
Renderer {
vertices: LinkedList::new(),
image: raster::Image::blank(height, width)
}
}
pub fn save(self) {
self.save_as("test_tmp.png".to_owned());
}
pub fn add(&mut self, geo: &'a Rc<RefCell<Box<Geometric2D>>>) {
self.vertices.push_front(geo);
}
pub fn save_as(&self, filename: String) {
raster::save(&self.image, &*filename);
}
pub fn draw_outline(&mut self) {
for v in &self.vertices {
v.borrow_mut().draw_outline(&mut self.image);
}
}
pub fn draw(&mut self) {
for v in &self.vertices {
v.borrow_mut().draw(&mut self.image);
}
}
}
|
impl<'a> Renderer<'a> {
//Construct a new Renderer
pub fn new(height: i32, width: i32) -> Renderer<'a> {
|
random_line_split
|
renderer.rs
|
extern crate raster;
use std::collections::LinkedList;
use std::cell::RefCell;
use std::rc::Rc;
use geometric::Geometric2D;
pub struct Renderer<'a> {
vertices: LinkedList<&'a Rc<RefCell<Box<Geometric2D>>>>,
image: raster::Image
}
impl<'a> Renderer<'a> {
//Construct a new Renderer
pub fn new(height: i32, width: i32) -> Renderer<'a> {
Renderer {
vertices: LinkedList::new(),
image: raster::Image::blank(height, width)
}
}
pub fn save(self) {
self.save_as("test_tmp.png".to_owned());
}
pub fn add(&mut self, geo: &'a Rc<RefCell<Box<Geometric2D>>>)
|
pub fn save_as(&self, filename: String) {
raster::save(&self.image, &*filename);
}
pub fn draw_outline(&mut self) {
for v in &self.vertices {
v.borrow_mut().draw_outline(&mut self.image);
}
}
pub fn draw(&mut self) {
for v in &self.vertices {
v.borrow_mut().draw(&mut self.image);
}
}
}
|
{
self.vertices.push_front(geo);
}
|
identifier_body
|
renderer.rs
|
extern crate raster;
use std::collections::LinkedList;
use std::cell::RefCell;
use std::rc::Rc;
use geometric::Geometric2D;
pub struct Renderer<'a> {
vertices: LinkedList<&'a Rc<RefCell<Box<Geometric2D>>>>,
image: raster::Image
}
impl<'a> Renderer<'a> {
//Construct a new Renderer
pub fn
|
(height: i32, width: i32) -> Renderer<'a> {
Renderer {
vertices: LinkedList::new(),
image: raster::Image::blank(height, width)
}
}
pub fn save(self) {
self.save_as("test_tmp.png".to_owned());
}
pub fn add(&mut self, geo: &'a Rc<RefCell<Box<Geometric2D>>>) {
self.vertices.push_front(geo);
}
pub fn save_as(&self, filename: String) {
raster::save(&self.image, &*filename);
}
pub fn draw_outline(&mut self) {
for v in &self.vertices {
v.borrow_mut().draw_outline(&mut self.image);
}
}
pub fn draw(&mut self) {
for v in &self.vertices {
v.borrow_mut().draw(&mut self.image);
}
}
}
|
new
|
identifier_name
|
rparser.rs
|
//! common functions for all parsers
pub use crate::probe::{L3Info, L4Info, ProbeL4, ProbeResult};
use crate::Variant;
/// Direction of current packet in current stream
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Direction {
/// Packet is sent from client to server
ToServer,
/// Packet is sent from server to client
ToClient,
}
/// Return value from protocol probe trying to identify a protocol.
#[derive(Debug, Eq, PartialEq)]
pub enum ParseResult {
/// No error
///
/// Note that this does not mean that the parser has successfully extracted data, only that
/// there is no error
Ok,
/// Parser will not analyzer more data of this protocol
///
/// For ex, this can be used to add stream to bypass list
Stop,
/// Content is not this protocol anymore (please do not send more data, and re-analyse current)
ProtocolChanged,
/// An error occurred (continue calling parser)
Error,
/// A fatal error occurred (never call this parser again)
Fatal,
}
/// Interface of all Rusticata parsers.
///
/// A object implementing the RParser trait is an instance of a parser,
/// including the state (and all associated variables).
pub trait RParser: Send + Sync {
// XXX static functions seem to cause problems with hashmaps
// fn probe(&[u8]) -> bool;
/// Configure parser
///
/// It is up to each parser to describe valid keys and expected types for values
fn configure(&mut self, _key: u32, _value: &Variant) {}
/// Parsing function
///
/// This function is called for every packet of a connection.
///
/// Arguments:
///
/// - `self`: the state (parser instance)
/// - a slice on the packet data
/// - the direction of this packet (0: to server, 1: to client)
///
/// Return value:
///
/// `R_STATUS_OK` or `R_STATUS_FAIL`, possibly or'ed with
/// `R_STATUS_EVENTS` if parsing events were raised.
fn parse(&mut self, data: &[u8], direction: u8) -> u32 {
let d = if direction == STREAM_TOSERVER {
Direction::ToServer
} else {
Direction::ToClient
};
match self.parse_l4(data, d) {
ParseResult::Ok => R_STATUS_OK,
_ => R_STATUS_FAIL,
}
}
/// Parsing function
///
/// This function is called for every packet of a connection.
fn parse_l4(&mut self, data: &[u8], direction: Direction) -> ParseResult;
/// Request data from key
fn get(&self, _key: &str) -> Option<Variant> {
None
}
/// Returns the available keys for the `get` function
fn keys(&self) -> ::std::slice::Iter<&str>
|
}
/// Interface of a parser builder
pub trait RBuilder: Send + Sync {
fn build(&self) -> Box<dyn RParser>;
fn get_l4_probe(&self) -> Option<ProbeL4> {
None
}
}
// status: return code, events
pub const R_STATUS_EVENTS: u32 = 0x0100;
pub const R_STATUS_OK: u32 = 0x0000;
pub const R_STATUS_FAIL: u32 = 0x0001;
pub const R_STATUS_EV_MASK: u32 = 0x0f00;
pub const R_STATUS_MASK: u32 = 0x00ff;
#[macro_export]
macro_rules! r_status_is_ok {
($status:expr) => {
($status & $crate::R_STATUS_MASK) == $crate::R_STATUS_MASK
};
}
#[macro_export]
macro_rules! r_status_has_events {
($status:expr) => {
($status & $crate::R_STATUS_EV_MASK) == $crate::R_STATUS_EVENTS
};
}
// Constants
pub const STREAM_TOSERVER: u8 = 0;
pub const STREAM_TOCLIENT: u8 = 1;
|
{
[].iter()
}
|
identifier_body
|
rparser.rs
|
//! common functions for all parsers
pub use crate::probe::{L3Info, L4Info, ProbeL4, ProbeResult};
use crate::Variant;
/// Direction of current packet in current stream
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Direction {
/// Packet is sent from client to server
ToServer,
/// Packet is sent from server to client
ToClient,
}
/// Return value from protocol probe trying to identify a protocol.
#[derive(Debug, Eq, PartialEq)]
pub enum ParseResult {
/// No error
///
/// Note that this does not mean that the parser has successfully extracted data, only that
/// there is no error
Ok,
/// Parser will not analyzer more data of this protocol
///
/// For ex, this can be used to add stream to bypass list
Stop,
/// Content is not this protocol anymore (please do not send more data, and re-analyse current)
ProtocolChanged,
/// An error occurred (continue calling parser)
Error,
/// A fatal error occurred (never call this parser again)
Fatal,
}
/// Interface of all Rusticata parsers.
///
/// A object implementing the RParser trait is an instance of a parser,
/// including the state (and all associated variables).
pub trait RParser: Send + Sync {
// XXX static functions seem to cause problems with hashmaps
// fn probe(&[u8]) -> bool;
/// Configure parser
///
/// It is up to each parser to describe valid keys and expected types for values
fn configure(&mut self, _key: u32, _value: &Variant) {}
/// Parsing function
///
/// This function is called for every packet of a connection.
///
/// Arguments:
///
/// - `self`: the state (parser instance)
/// - a slice on the packet data
/// - the direction of this packet (0: to server, 1: to client)
///
/// Return value:
///
/// `R_STATUS_OK` or `R_STATUS_FAIL`, possibly or'ed with
/// `R_STATUS_EVENTS` if parsing events were raised.
fn parse(&mut self, data: &[u8], direction: u8) -> u32 {
let d = if direction == STREAM_TOSERVER {
Direction::ToServer
} else {
Direction::ToClient
};
match self.parse_l4(data, d) {
ParseResult::Ok => R_STATUS_OK,
_ => R_STATUS_FAIL,
}
}
/// Parsing function
///
/// This function is called for every packet of a connection.
fn parse_l4(&mut self, data: &[u8], direction: Direction) -> ParseResult;
/// Request data from key
fn get(&self, _key: &str) -> Option<Variant> {
None
}
/// Returns the available keys for the `get` function
fn keys(&self) -> ::std::slice::Iter<&str> {
[].iter()
}
}
/// Interface of a parser builder
pub trait RBuilder: Send + Sync {
fn build(&self) -> Box<dyn RParser>;
fn get_l4_probe(&self) -> Option<ProbeL4> {
None
}
}
// status: return code, events
pub const R_STATUS_EVENTS: u32 = 0x0100;
pub const R_STATUS_OK: u32 = 0x0000;
pub const R_STATUS_FAIL: u32 = 0x0001;
pub const R_STATUS_EV_MASK: u32 = 0x0f00;
pub const R_STATUS_MASK: u32 = 0x00ff;
#[macro_export]
macro_rules! r_status_is_ok {
($status:expr) => {
($status & $crate::R_STATUS_MASK) == $crate::R_STATUS_MASK
};
}
#[macro_export]
macro_rules! r_status_has_events {
($status:expr) => {
|
// Constants
pub const STREAM_TOSERVER: u8 = 0;
pub const STREAM_TOCLIENT: u8 = 1;
|
($status & $crate::R_STATUS_EV_MASK) == $crate::R_STATUS_EVENTS
};
}
|
random_line_split
|
rparser.rs
|
//! common functions for all parsers
pub use crate::probe::{L3Info, L4Info, ProbeL4, ProbeResult};
use crate::Variant;
/// Direction of current packet in current stream
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum
|
{
/// Packet is sent from client to server
ToServer,
/// Packet is sent from server to client
ToClient,
}
/// Return value from protocol probe trying to identify a protocol.
#[derive(Debug, Eq, PartialEq)]
pub enum ParseResult {
/// No error
///
/// Note that this does not mean that the parser has successfully extracted data, only that
/// there is no error
Ok,
/// Parser will not analyzer more data of this protocol
///
/// For ex, this can be used to add stream to bypass list
Stop,
/// Content is not this protocol anymore (please do not send more data, and re-analyse current)
ProtocolChanged,
/// An error occurred (continue calling parser)
Error,
/// A fatal error occurred (never call this parser again)
Fatal,
}
/// Interface of all Rusticata parsers.
///
/// A object implementing the RParser trait is an instance of a parser,
/// including the state (and all associated variables).
pub trait RParser: Send + Sync {
// XXX static functions seem to cause problems with hashmaps
// fn probe(&[u8]) -> bool;
/// Configure parser
///
/// It is up to each parser to describe valid keys and expected types for values
fn configure(&mut self, _key: u32, _value: &Variant) {}
/// Parsing function
///
/// This function is called for every packet of a connection.
///
/// Arguments:
///
/// - `self`: the state (parser instance)
/// - a slice on the packet data
/// - the direction of this packet (0: to server, 1: to client)
///
/// Return value:
///
/// `R_STATUS_OK` or `R_STATUS_FAIL`, possibly or'ed with
/// `R_STATUS_EVENTS` if parsing events were raised.
fn parse(&mut self, data: &[u8], direction: u8) -> u32 {
let d = if direction == STREAM_TOSERVER {
Direction::ToServer
} else {
Direction::ToClient
};
match self.parse_l4(data, d) {
ParseResult::Ok => R_STATUS_OK,
_ => R_STATUS_FAIL,
}
}
/// Parsing function
///
/// This function is called for every packet of a connection.
fn parse_l4(&mut self, data: &[u8], direction: Direction) -> ParseResult;
/// Request data from key
fn get(&self, _key: &str) -> Option<Variant> {
None
}
/// Returns the available keys for the `get` function
fn keys(&self) -> ::std::slice::Iter<&str> {
[].iter()
}
}
/// Interface of a parser builder
pub trait RBuilder: Send + Sync {
fn build(&self) -> Box<dyn RParser>;
fn get_l4_probe(&self) -> Option<ProbeL4> {
None
}
}
// status: return code, events
pub const R_STATUS_EVENTS: u32 = 0x0100;
pub const R_STATUS_OK: u32 = 0x0000;
pub const R_STATUS_FAIL: u32 = 0x0001;
pub const R_STATUS_EV_MASK: u32 = 0x0f00;
pub const R_STATUS_MASK: u32 = 0x00ff;
#[macro_export]
macro_rules! r_status_is_ok {
($status:expr) => {
($status & $crate::R_STATUS_MASK) == $crate::R_STATUS_MASK
};
}
#[macro_export]
macro_rules! r_status_has_events {
($status:expr) => {
($status & $crate::R_STATUS_EV_MASK) == $crate::R_STATUS_EVENTS
};
}
// Constants
pub const STREAM_TOSERVER: u8 = 0;
pub const STREAM_TOCLIENT: u8 = 1;
|
Direction
|
identifier_name
|
default.rs
|
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use rustc_ast::ptr::P;
use rustc_ast::walk_list;
use rustc_ast::EnumDef;
use rustc_ast::VariantData;
use rustc_ast::{Expr, MetaItem};
use rustc_errors::Applicability;
use rustc_expand::base::{Annotatable, DummyResult, ExtCtxt};
use rustc_span::symbol::Ident;
use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use smallvec::SmallVec;
pub fn expand_deriving_default(
cx: &mut ExtCtxt<'_>,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
item.visit_with(&mut DetectNonVariantDefaultAttr { cx });
let inline = cx.meta_word(span, sym::inline);
let attrs = vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
attributes: Vec::new(),
path: Path::new(vec![kw::Default, sym::Default]),
additional_bounds: Vec::new(),
generics: Bounds::empty(),
is_unsafe: false,
supports_unions: false,
methods: vec![MethodDef {
name: kw::Default,
generics: Bounds::empty(),
explicit_self: None,
args: Vec::new(),
ret_ty: Self_,
attributes: attrs,
is_unsafe: false,
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|cx, trait_span, substr| {
match substr.fields {
StaticStruct(_, fields) => {
default_struct_substructure(cx, trait_span, substr, fields)
}
StaticEnum(enum_def, _) => {
if!cx.sess.features_untracked().derive_default_enum {
rustc_session::parse::feature_err(
cx.parse_sess(),
sym::derive_default_enum,
span,
"deriving `Default` on enums is experimental",
)
.emit();
}
default_enum_substructure(cx, trait_span, enum_def)
}
_ => cx.span_bug(trait_span, "method in `derive(Default)`"),
}
})),
}],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn default_struct_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
substr: &Substructure<'_>,
summary: &StaticFields,
) -> P<Expr> {
// Note that `kw::Default` is "default" and `sym::Default` is "Default"!
let default_ident = cx.std_path(&[kw::Default, sym::Default, kw::Default]);
let default_call = |span| cx.expr_call_global(span, default_ident.clone(), Vec::new());
match summary {
Unnamed(ref fields, is_tuple) => {
if!is_tuple {
cx.expr_ident(trait_span, substr.type_ident)
} else {
let exprs = fields.iter().map(|sp| default_call(*sp)).collect();
cx.expr_call_ident(trait_span, substr.type_ident, exprs)
}
}
Named(ref fields) => {
let default_fields = fields
.iter()
.map(|&(ident, span)| cx.field_imm(span, ident, default_call(span)))
.collect();
cx.expr_struct_ident(trait_span, substr.type_ident, default_fields)
}
}
}
fn default_enum_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
enum_def: &EnumDef,
) -> P<Expr> {
let default_variant = match extract_default_variant(cx, enum_def, trait_span) {
Ok(value) => value,
Err(()) => return DummyResult::raw_expr(trait_span, true),
};
// At this point, we know that there is exactly one variant with a `#[default]` attribute. The
// attribute hasn't yet been validated.
if let Err(()) = validate_default_attribute(cx, default_variant) {
return DummyResult::raw_expr(trait_span, true);
}
// We now know there is exactly one unit variant with exactly one `#[default]` attribute.
cx.expr_path(cx.path(
default_variant.span,
vec![Ident::new(kw::SelfUpper, default_variant.span), default_variant.ident],
))
}
fn extract_default_variant<'a>(
cx: &mut ExtCtxt<'_>,
enum_def: &'a EnumDef,
trait_span: Span,
) -> Result<&'a rustc_ast::Variant, ()> {
let default_variants: SmallVec<[_; 1]> = enum_def
.variants
.iter()
.filter(|variant| cx.sess.contains_name(&variant.attrs, kw::Default))
.collect();
let variant = match default_variants.as_slice() {
[variant] => variant,
[] => {
let possible_defaults = enum_def
.variants
.iter()
.filter(|variant| matches!(variant.data, VariantData::Unit(..)))
.filter(|variant|!cx.sess.contains_name(&variant.attrs, sym::non_exhaustive));
let mut diag = cx.struct_span_err(trait_span, "no default declared");
diag.help("make a unit variant default by placing `#[default]` above it");
for variant in possible_defaults {
// Suggest making each unit variant default.
diag.tool_only_span_suggestion(
variant.span,
&format!("make `{}` default", variant.ident),
format!("#[default] {}", variant.ident),
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
[first, rest @..] => {
let mut diag = cx.struct_span_err(trait_span, "multiple declared defaults");
diag.span_label(first.span, "first default");
diag.span_labels(rest.iter().map(|variant| variant.span), "additional default");
diag.note("only one variant can be default");
for variant in &default_variants {
// Suggest making each variant already tagged default.
let suggestion = default_variants
.iter()
.filter_map(|v| {
if v.ident == variant.ident {
None
} else {
Some((cx.sess.find_by_name(&v.attrs, kw::Default)?.span, String::new()))
}
})
.collect();
diag.tool_only_multipart_suggestion(
&format!("make `{}` default", variant.ident),
suggestion,
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
};
if!matches!(variant.data, VariantData::Unit(..)) {
cx.struct_span_err(
variant.ident.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
if let Some(non_exhaustive_attr) = cx.sess.find_by_name(&variant.attrs, sym::non_exhaustive) {
cx.struct_span_err(variant.ident.span, "default variant must be exhaustive")
.span_label(non_exhaustive_attr.span, "declared `#[non_exhaustive]` here")
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
Ok(variant)
}
fn validate_default_attribute(
cx: &mut ExtCtxt<'_>,
default_variant: &rustc_ast::Variant,
) -> Result<(), ()> {
let attrs: SmallVec<[_; 1]> =
cx.sess.filter_by_name(&default_variant.attrs, kw::Default).collect();
let attr = match attrs.as_slice() {
[attr] => attr,
[] => cx.bug(
"this method must only be called with a variant that has a `#[default]` attribute",
),
[first, rest @..] => {
// FIXME(jhpratt) Do we want to perform this check? It doesn't exist
// for `#[inline]`, `#[non_exhaustive]`, and presumably others.
let suggestion_text =
if rest.len() == 1 { "try removing this" } else { "try removing these" };
cx.struct_span_err(default_variant.ident.span, "multiple `#[default]` attributes")
.note("only one `#[default]` attribute is needed")
.span_label(first.span, "`#[default]` used here")
.span_label(rest[0].span, "`#[default]` used again here")
.span_help(rest.iter().map(|attr| attr.span).collect::<Vec<_>>(), suggestion_text)
// This would otherwise display the empty replacement, hence the otherwise
// repetitive `.span_help` call above.
.tool_only_multipart_suggestion(
suggestion_text,
rest.iter().map(|attr| (attr.span, String::new())).collect(),
Applicability::MachineApplicable,
)
.emit();
return Err(());
}
};
if!attr.is_word() {
cx.struct_span_err(attr.span, "`#[default]` attribute does not accept a value")
.span_suggestion_hidden(
attr.span,
"try using `#[default]`",
"#[default]".into(),
Applicability::MaybeIncorrect,
)
|
}
struct DetectNonVariantDefaultAttr<'a, 'b> {
cx: &'a ExtCtxt<'b>,
}
impl<'a, 'b> rustc_ast::visit::Visitor<'a> for DetectNonVariantDefaultAttr<'a, 'b> {
fn visit_attribute(&mut self, attr: &'a rustc_ast::Attribute) {
if attr.has_name(kw::Default) {
self.cx
.struct_span_err(
attr.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.emit();
}
rustc_ast::visit::walk_attribute(self, attr);
}
fn visit_variant(&mut self, v: &'a rustc_ast::Variant) {
self.visit_ident(v.ident);
self.visit_vis(&v.vis);
self.visit_variant_data(&v.data);
walk_list!(self, visit_anon_const, &v.disr_expr);
for attr in &v.attrs {
rustc_ast::visit::walk_attribute(self, attr);
}
}
}
|
.emit();
return Err(());
}
Ok(())
|
random_line_split
|
default.rs
|
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use rustc_ast::ptr::P;
use rustc_ast::walk_list;
use rustc_ast::EnumDef;
use rustc_ast::VariantData;
use rustc_ast::{Expr, MetaItem};
use rustc_errors::Applicability;
use rustc_expand::base::{Annotatable, DummyResult, ExtCtxt};
use rustc_span::symbol::Ident;
use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use smallvec::SmallVec;
pub fn expand_deriving_default(
cx: &mut ExtCtxt<'_>,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
item.visit_with(&mut DetectNonVariantDefaultAttr { cx });
let inline = cx.meta_word(span, sym::inline);
let attrs = vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
attributes: Vec::new(),
path: Path::new(vec![kw::Default, sym::Default]),
additional_bounds: Vec::new(),
generics: Bounds::empty(),
is_unsafe: false,
supports_unions: false,
methods: vec![MethodDef {
name: kw::Default,
generics: Bounds::empty(),
explicit_self: None,
args: Vec::new(),
ret_ty: Self_,
attributes: attrs,
is_unsafe: false,
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|cx, trait_span, substr| {
match substr.fields {
StaticStruct(_, fields) => {
default_struct_substructure(cx, trait_span, substr, fields)
}
StaticEnum(enum_def, _) => {
if!cx.sess.features_untracked().derive_default_enum {
rustc_session::parse::feature_err(
cx.parse_sess(),
sym::derive_default_enum,
span,
"deriving `Default` on enums is experimental",
)
.emit();
}
default_enum_substructure(cx, trait_span, enum_def)
}
_ => cx.span_bug(trait_span, "method in `derive(Default)`"),
}
})),
}],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn default_struct_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
substr: &Substructure<'_>,
summary: &StaticFields,
) -> P<Expr>
|
}
}
}
fn default_enum_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
enum_def: &EnumDef,
) -> P<Expr> {
let default_variant = match extract_default_variant(cx, enum_def, trait_span) {
Ok(value) => value,
Err(()) => return DummyResult::raw_expr(trait_span, true),
};
// At this point, we know that there is exactly one variant with a `#[default]` attribute. The
// attribute hasn't yet been validated.
if let Err(()) = validate_default_attribute(cx, default_variant) {
return DummyResult::raw_expr(trait_span, true);
}
// We now know there is exactly one unit variant with exactly one `#[default]` attribute.
cx.expr_path(cx.path(
default_variant.span,
vec![Ident::new(kw::SelfUpper, default_variant.span), default_variant.ident],
))
}
fn extract_default_variant<'a>(
cx: &mut ExtCtxt<'_>,
enum_def: &'a EnumDef,
trait_span: Span,
) -> Result<&'a rustc_ast::Variant, ()> {
let default_variants: SmallVec<[_; 1]> = enum_def
.variants
.iter()
.filter(|variant| cx.sess.contains_name(&variant.attrs, kw::Default))
.collect();
let variant = match default_variants.as_slice() {
[variant] => variant,
[] => {
let possible_defaults = enum_def
.variants
.iter()
.filter(|variant| matches!(variant.data, VariantData::Unit(..)))
.filter(|variant|!cx.sess.contains_name(&variant.attrs, sym::non_exhaustive));
let mut diag = cx.struct_span_err(trait_span, "no default declared");
diag.help("make a unit variant default by placing `#[default]` above it");
for variant in possible_defaults {
// Suggest making each unit variant default.
diag.tool_only_span_suggestion(
variant.span,
&format!("make `{}` default", variant.ident),
format!("#[default] {}", variant.ident),
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
[first, rest @..] => {
let mut diag = cx.struct_span_err(trait_span, "multiple declared defaults");
diag.span_label(first.span, "first default");
diag.span_labels(rest.iter().map(|variant| variant.span), "additional default");
diag.note("only one variant can be default");
for variant in &default_variants {
// Suggest making each variant already tagged default.
let suggestion = default_variants
.iter()
.filter_map(|v| {
if v.ident == variant.ident {
None
} else {
Some((cx.sess.find_by_name(&v.attrs, kw::Default)?.span, String::new()))
}
})
.collect();
diag.tool_only_multipart_suggestion(
&format!("make `{}` default", variant.ident),
suggestion,
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
};
if!matches!(variant.data, VariantData::Unit(..)) {
cx.struct_span_err(
variant.ident.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
if let Some(non_exhaustive_attr) = cx.sess.find_by_name(&variant.attrs, sym::non_exhaustive) {
cx.struct_span_err(variant.ident.span, "default variant must be exhaustive")
.span_label(non_exhaustive_attr.span, "declared `#[non_exhaustive]` here")
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
Ok(variant)
}
fn validate_default_attribute(
cx: &mut ExtCtxt<'_>,
default_variant: &rustc_ast::Variant,
) -> Result<(), ()> {
let attrs: SmallVec<[_; 1]> =
cx.sess.filter_by_name(&default_variant.attrs, kw::Default).collect();
let attr = match attrs.as_slice() {
[attr] => attr,
[] => cx.bug(
"this method must only be called with a variant that has a `#[default]` attribute",
),
[first, rest @..] => {
// FIXME(jhpratt) Do we want to perform this check? It doesn't exist
// for `#[inline]`, `#[non_exhaustive]`, and presumably others.
let suggestion_text =
if rest.len() == 1 { "try removing this" } else { "try removing these" };
cx.struct_span_err(default_variant.ident.span, "multiple `#[default]` attributes")
.note("only one `#[default]` attribute is needed")
.span_label(first.span, "`#[default]` used here")
.span_label(rest[0].span, "`#[default]` used again here")
.span_help(rest.iter().map(|attr| attr.span).collect::<Vec<_>>(), suggestion_text)
// This would otherwise display the empty replacement, hence the otherwise
// repetitive `.span_help` call above.
.tool_only_multipart_suggestion(
suggestion_text,
rest.iter().map(|attr| (attr.span, String::new())).collect(),
Applicability::MachineApplicable,
)
.emit();
return Err(());
}
};
if!attr.is_word() {
cx.struct_span_err(attr.span, "`#[default]` attribute does not accept a value")
.span_suggestion_hidden(
attr.span,
"try using `#[default]`",
"#[default]".into(),
Applicability::MaybeIncorrect,
)
.emit();
return Err(());
}
Ok(())
}
struct DetectNonVariantDefaultAttr<'a, 'b> {
cx: &'a ExtCtxt<'b>,
}
impl<'a, 'b> rustc_ast::visit::Visitor<'a> for DetectNonVariantDefaultAttr<'a, 'b> {
fn visit_attribute(&mut self, attr: &'a rustc_ast::Attribute) {
if attr.has_name(kw::Default) {
self.cx
.struct_span_err(
attr.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.emit();
}
rustc_ast::visit::walk_attribute(self, attr);
}
fn visit_variant(&mut self, v: &'a rustc_ast::Variant) {
self.visit_ident(v.ident);
self.visit_vis(&v.vis);
self.visit_variant_data(&v.data);
walk_list!(self, visit_anon_const, &v.disr_expr);
for attr in &v.attrs {
rustc_ast::visit::walk_attribute(self, attr);
}
}
}
|
{
// Note that `kw::Default` is "default" and `sym::Default` is "Default"!
let default_ident = cx.std_path(&[kw::Default, sym::Default, kw::Default]);
let default_call = |span| cx.expr_call_global(span, default_ident.clone(), Vec::new());
match summary {
Unnamed(ref fields, is_tuple) => {
if !is_tuple {
cx.expr_ident(trait_span, substr.type_ident)
} else {
let exprs = fields.iter().map(|sp| default_call(*sp)).collect();
cx.expr_call_ident(trait_span, substr.type_ident, exprs)
}
}
Named(ref fields) => {
let default_fields = fields
.iter()
.map(|&(ident, span)| cx.field_imm(span, ident, default_call(span)))
.collect();
cx.expr_struct_ident(trait_span, substr.type_ident, default_fields)
|
identifier_body
|
default.rs
|
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use rustc_ast::ptr::P;
use rustc_ast::walk_list;
use rustc_ast::EnumDef;
use rustc_ast::VariantData;
use rustc_ast::{Expr, MetaItem};
use rustc_errors::Applicability;
use rustc_expand::base::{Annotatable, DummyResult, ExtCtxt};
use rustc_span::symbol::Ident;
use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use smallvec::SmallVec;
pub fn expand_deriving_default(
cx: &mut ExtCtxt<'_>,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
item.visit_with(&mut DetectNonVariantDefaultAttr { cx });
let inline = cx.meta_word(span, sym::inline);
let attrs = vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
attributes: Vec::new(),
path: Path::new(vec![kw::Default, sym::Default]),
additional_bounds: Vec::new(),
generics: Bounds::empty(),
is_unsafe: false,
supports_unions: false,
methods: vec![MethodDef {
name: kw::Default,
generics: Bounds::empty(),
explicit_self: None,
args: Vec::new(),
ret_ty: Self_,
attributes: attrs,
is_unsafe: false,
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|cx, trait_span, substr| {
match substr.fields {
StaticStruct(_, fields) => {
default_struct_substructure(cx, trait_span, substr, fields)
}
StaticEnum(enum_def, _) => {
if!cx.sess.features_untracked().derive_default_enum {
rustc_session::parse::feature_err(
cx.parse_sess(),
sym::derive_default_enum,
span,
"deriving `Default` on enums is experimental",
)
.emit();
}
default_enum_substructure(cx, trait_span, enum_def)
}
_ => cx.span_bug(trait_span, "method in `derive(Default)`"),
}
})),
}],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn default_struct_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
substr: &Substructure<'_>,
summary: &StaticFields,
) -> P<Expr> {
// Note that `kw::Default` is "default" and `sym::Default` is "Default"!
let default_ident = cx.std_path(&[kw::Default, sym::Default, kw::Default]);
let default_call = |span| cx.expr_call_global(span, default_ident.clone(), Vec::new());
match summary {
Unnamed(ref fields, is_tuple) => {
if!is_tuple {
cx.expr_ident(trait_span, substr.type_ident)
} else {
let exprs = fields.iter().map(|sp| default_call(*sp)).collect();
cx.expr_call_ident(trait_span, substr.type_ident, exprs)
}
}
Named(ref fields) => {
let default_fields = fields
.iter()
.map(|&(ident, span)| cx.field_imm(span, ident, default_call(span)))
.collect();
cx.expr_struct_ident(trait_span, substr.type_ident, default_fields)
}
}
}
fn default_enum_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
enum_def: &EnumDef,
) -> P<Expr> {
let default_variant = match extract_default_variant(cx, enum_def, trait_span) {
Ok(value) => value,
Err(()) => return DummyResult::raw_expr(trait_span, true),
};
// At this point, we know that there is exactly one variant with a `#[default]` attribute. The
// attribute hasn't yet been validated.
if let Err(()) = validate_default_attribute(cx, default_variant) {
return DummyResult::raw_expr(trait_span, true);
}
// We now know there is exactly one unit variant with exactly one `#[default]` attribute.
cx.expr_path(cx.path(
default_variant.span,
vec![Ident::new(kw::SelfUpper, default_variant.span), default_variant.ident],
))
}
fn extract_default_variant<'a>(
cx: &mut ExtCtxt<'_>,
enum_def: &'a EnumDef,
trait_span: Span,
) -> Result<&'a rustc_ast::Variant, ()> {
let default_variants: SmallVec<[_; 1]> = enum_def
.variants
.iter()
.filter(|variant| cx.sess.contains_name(&variant.attrs, kw::Default))
.collect();
let variant = match default_variants.as_slice() {
[variant] => variant,
[] => {
let possible_defaults = enum_def
.variants
.iter()
.filter(|variant| matches!(variant.data, VariantData::Unit(..)))
.filter(|variant|!cx.sess.contains_name(&variant.attrs, sym::non_exhaustive));
let mut diag = cx.struct_span_err(trait_span, "no default declared");
diag.help("make a unit variant default by placing `#[default]` above it");
for variant in possible_defaults {
// Suggest making each unit variant default.
diag.tool_only_span_suggestion(
variant.span,
&format!("make `{}` default", variant.ident),
format!("#[default] {}", variant.ident),
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
[first, rest @..] => {
let mut diag = cx.struct_span_err(trait_span, "multiple declared defaults");
diag.span_label(first.span, "first default");
diag.span_labels(rest.iter().map(|variant| variant.span), "additional default");
diag.note("only one variant can be default");
for variant in &default_variants {
// Suggest making each variant already tagged default.
let suggestion = default_variants
.iter()
.filter_map(|v| {
if v.ident == variant.ident {
None
} else {
Some((cx.sess.find_by_name(&v.attrs, kw::Default)?.span, String::new()))
}
})
.collect();
diag.tool_only_multipart_suggestion(
&format!("make `{}` default", variant.ident),
suggestion,
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
};
if!matches!(variant.data, VariantData::Unit(..)) {
cx.struct_span_err(
variant.ident.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
if let Some(non_exhaustive_attr) = cx.sess.find_by_name(&variant.attrs, sym::non_exhaustive) {
cx.struct_span_err(variant.ident.span, "default variant must be exhaustive")
.span_label(non_exhaustive_attr.span, "declared `#[non_exhaustive]` here")
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
Ok(variant)
}
fn
|
(
cx: &mut ExtCtxt<'_>,
default_variant: &rustc_ast::Variant,
) -> Result<(), ()> {
let attrs: SmallVec<[_; 1]> =
cx.sess.filter_by_name(&default_variant.attrs, kw::Default).collect();
let attr = match attrs.as_slice() {
[attr] => attr,
[] => cx.bug(
"this method must only be called with a variant that has a `#[default]` attribute",
),
[first, rest @..] => {
// FIXME(jhpratt) Do we want to perform this check? It doesn't exist
// for `#[inline]`, `#[non_exhaustive]`, and presumably others.
let suggestion_text =
if rest.len() == 1 { "try removing this" } else { "try removing these" };
cx.struct_span_err(default_variant.ident.span, "multiple `#[default]` attributes")
.note("only one `#[default]` attribute is needed")
.span_label(first.span, "`#[default]` used here")
.span_label(rest[0].span, "`#[default]` used again here")
.span_help(rest.iter().map(|attr| attr.span).collect::<Vec<_>>(), suggestion_text)
// This would otherwise display the empty replacement, hence the otherwise
// repetitive `.span_help` call above.
.tool_only_multipart_suggestion(
suggestion_text,
rest.iter().map(|attr| (attr.span, String::new())).collect(),
Applicability::MachineApplicable,
)
.emit();
return Err(());
}
};
if!attr.is_word() {
cx.struct_span_err(attr.span, "`#[default]` attribute does not accept a value")
.span_suggestion_hidden(
attr.span,
"try using `#[default]`",
"#[default]".into(),
Applicability::MaybeIncorrect,
)
.emit();
return Err(());
}
Ok(())
}
struct DetectNonVariantDefaultAttr<'a, 'b> {
cx: &'a ExtCtxt<'b>,
}
impl<'a, 'b> rustc_ast::visit::Visitor<'a> for DetectNonVariantDefaultAttr<'a, 'b> {
fn visit_attribute(&mut self, attr: &'a rustc_ast::Attribute) {
if attr.has_name(kw::Default) {
self.cx
.struct_span_err(
attr.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.emit();
}
rustc_ast::visit::walk_attribute(self, attr);
}
fn visit_variant(&mut self, v: &'a rustc_ast::Variant) {
self.visit_ident(v.ident);
self.visit_vis(&v.vis);
self.visit_variant_data(&v.data);
walk_list!(self, visit_anon_const, &v.disr_expr);
for attr in &v.attrs {
rustc_ast::visit::walk_attribute(self, attr);
}
}
}
|
validate_default_attribute
|
identifier_name
|
default.rs
|
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use rustc_ast::ptr::P;
use rustc_ast::walk_list;
use rustc_ast::EnumDef;
use rustc_ast::VariantData;
use rustc_ast::{Expr, MetaItem};
use rustc_errors::Applicability;
use rustc_expand::base::{Annotatable, DummyResult, ExtCtxt};
use rustc_span::symbol::Ident;
use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use smallvec::SmallVec;
pub fn expand_deriving_default(
cx: &mut ExtCtxt<'_>,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
item.visit_with(&mut DetectNonVariantDefaultAttr { cx });
let inline = cx.meta_word(span, sym::inline);
let attrs = vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
attributes: Vec::new(),
path: Path::new(vec![kw::Default, sym::Default]),
additional_bounds: Vec::new(),
generics: Bounds::empty(),
is_unsafe: false,
supports_unions: false,
methods: vec![MethodDef {
name: kw::Default,
generics: Bounds::empty(),
explicit_self: None,
args: Vec::new(),
ret_ty: Self_,
attributes: attrs,
is_unsafe: false,
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|cx, trait_span, substr| {
match substr.fields {
StaticStruct(_, fields) => {
default_struct_substructure(cx, trait_span, substr, fields)
}
StaticEnum(enum_def, _) => {
if!cx.sess.features_untracked().derive_default_enum {
rustc_session::parse::feature_err(
cx.parse_sess(),
sym::derive_default_enum,
span,
"deriving `Default` on enums is experimental",
)
.emit();
}
default_enum_substructure(cx, trait_span, enum_def)
}
_ => cx.span_bug(trait_span, "method in `derive(Default)`"),
}
})),
}],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn default_struct_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
substr: &Substructure<'_>,
summary: &StaticFields,
) -> P<Expr> {
// Note that `kw::Default` is "default" and `sym::Default` is "Default"!
let default_ident = cx.std_path(&[kw::Default, sym::Default, kw::Default]);
let default_call = |span| cx.expr_call_global(span, default_ident.clone(), Vec::new());
match summary {
Unnamed(ref fields, is_tuple) =>
|
Named(ref fields) => {
let default_fields = fields
.iter()
.map(|&(ident, span)| cx.field_imm(span, ident, default_call(span)))
.collect();
cx.expr_struct_ident(trait_span, substr.type_ident, default_fields)
}
}
}
fn default_enum_substructure(
cx: &mut ExtCtxt<'_>,
trait_span: Span,
enum_def: &EnumDef,
) -> P<Expr> {
let default_variant = match extract_default_variant(cx, enum_def, trait_span) {
Ok(value) => value,
Err(()) => return DummyResult::raw_expr(trait_span, true),
};
// At this point, we know that there is exactly one variant with a `#[default]` attribute. The
// attribute hasn't yet been validated.
if let Err(()) = validate_default_attribute(cx, default_variant) {
return DummyResult::raw_expr(trait_span, true);
}
// We now know there is exactly one unit variant with exactly one `#[default]` attribute.
cx.expr_path(cx.path(
default_variant.span,
vec![Ident::new(kw::SelfUpper, default_variant.span), default_variant.ident],
))
}
fn extract_default_variant<'a>(
cx: &mut ExtCtxt<'_>,
enum_def: &'a EnumDef,
trait_span: Span,
) -> Result<&'a rustc_ast::Variant, ()> {
let default_variants: SmallVec<[_; 1]> = enum_def
.variants
.iter()
.filter(|variant| cx.sess.contains_name(&variant.attrs, kw::Default))
.collect();
let variant = match default_variants.as_slice() {
[variant] => variant,
[] => {
let possible_defaults = enum_def
.variants
.iter()
.filter(|variant| matches!(variant.data, VariantData::Unit(..)))
.filter(|variant|!cx.sess.contains_name(&variant.attrs, sym::non_exhaustive));
let mut diag = cx.struct_span_err(trait_span, "no default declared");
diag.help("make a unit variant default by placing `#[default]` above it");
for variant in possible_defaults {
// Suggest making each unit variant default.
diag.tool_only_span_suggestion(
variant.span,
&format!("make `{}` default", variant.ident),
format!("#[default] {}", variant.ident),
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
[first, rest @..] => {
let mut diag = cx.struct_span_err(trait_span, "multiple declared defaults");
diag.span_label(first.span, "first default");
diag.span_labels(rest.iter().map(|variant| variant.span), "additional default");
diag.note("only one variant can be default");
for variant in &default_variants {
// Suggest making each variant already tagged default.
let suggestion = default_variants
.iter()
.filter_map(|v| {
if v.ident == variant.ident {
None
} else {
Some((cx.sess.find_by_name(&v.attrs, kw::Default)?.span, String::new()))
}
})
.collect();
diag.tool_only_multipart_suggestion(
&format!("make `{}` default", variant.ident),
suggestion,
Applicability::MaybeIncorrect,
);
}
diag.emit();
return Err(());
}
};
if!matches!(variant.data, VariantData::Unit(..)) {
cx.struct_span_err(
variant.ident.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
if let Some(non_exhaustive_attr) = cx.sess.find_by_name(&variant.attrs, sym::non_exhaustive) {
cx.struct_span_err(variant.ident.span, "default variant must be exhaustive")
.span_label(non_exhaustive_attr.span, "declared `#[non_exhaustive]` here")
.help("consider a manual implementation of `Default`")
.emit();
return Err(());
}
Ok(variant)
}
fn validate_default_attribute(
cx: &mut ExtCtxt<'_>,
default_variant: &rustc_ast::Variant,
) -> Result<(), ()> {
let attrs: SmallVec<[_; 1]> =
cx.sess.filter_by_name(&default_variant.attrs, kw::Default).collect();
let attr = match attrs.as_slice() {
[attr] => attr,
[] => cx.bug(
"this method must only be called with a variant that has a `#[default]` attribute",
),
[first, rest @..] => {
// FIXME(jhpratt) Do we want to perform this check? It doesn't exist
// for `#[inline]`, `#[non_exhaustive]`, and presumably others.
let suggestion_text =
if rest.len() == 1 { "try removing this" } else { "try removing these" };
cx.struct_span_err(default_variant.ident.span, "multiple `#[default]` attributes")
.note("only one `#[default]` attribute is needed")
.span_label(first.span, "`#[default]` used here")
.span_label(rest[0].span, "`#[default]` used again here")
.span_help(rest.iter().map(|attr| attr.span).collect::<Vec<_>>(), suggestion_text)
// This would otherwise display the empty replacement, hence the otherwise
// repetitive `.span_help` call above.
.tool_only_multipart_suggestion(
suggestion_text,
rest.iter().map(|attr| (attr.span, String::new())).collect(),
Applicability::MachineApplicable,
)
.emit();
return Err(());
}
};
if!attr.is_word() {
cx.struct_span_err(attr.span, "`#[default]` attribute does not accept a value")
.span_suggestion_hidden(
attr.span,
"try using `#[default]`",
"#[default]".into(),
Applicability::MaybeIncorrect,
)
.emit();
return Err(());
}
Ok(())
}
struct DetectNonVariantDefaultAttr<'a, 'b> {
cx: &'a ExtCtxt<'b>,
}
impl<'a, 'b> rustc_ast::visit::Visitor<'a> for DetectNonVariantDefaultAttr<'a, 'b> {
fn visit_attribute(&mut self, attr: &'a rustc_ast::Attribute) {
if attr.has_name(kw::Default) {
self.cx
.struct_span_err(
attr.span,
"the `#[default]` attribute may only be used on unit enum variants",
)
.emit();
}
rustc_ast::visit::walk_attribute(self, attr);
}
fn visit_variant(&mut self, v: &'a rustc_ast::Variant) {
self.visit_ident(v.ident);
self.visit_vis(&v.vis);
self.visit_variant_data(&v.data);
walk_list!(self, visit_anon_const, &v.disr_expr);
for attr in &v.attrs {
rustc_ast::visit::walk_attribute(self, attr);
}
}
}
|
{
if !is_tuple {
cx.expr_ident(trait_span, substr.type_ident)
} else {
let exprs = fields.iter().map(|sp| default_call(*sp)).collect();
cx.expr_call_ident(trait_span, substr.type_ident, exprs)
}
}
|
conditional_block
|
filesystem.rs
|
use std::path::Path;
use std::fs::File;
use std::io::prelude::*;
use std::env;
pub fn check_extension(path: &Path, valid_ext: &[&str]) -> bool {
let ext = &path.extension().expect("The file has no extension").to_str().expect("Extension is not valid utf8");
for vext in valid_ext.iter() {
if vext == ext {
return true;
}
}
false
}
pub fn read_file(path_str: &str) -> String
|
Err(msg) => panic!("Found non valid utf8 characters in {} : {}.", path.display(), msg)
}
},
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
}
}
|
{
let mut path = env::current_dir().unwrap();
path.push(Path::new(path_str));
if !path.exists() {
panic!("Error reading file, path {} doesn't exist.", path.display());
}
let mut f = match File::open(&path) {
Ok(f) => f,
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
};
// read bytes and return as str
let mut bytes = Vec::new();
match f.read_to_end(&mut bytes) {
Ok(_) => {
match String::from_utf8(bytes) {
Ok(s) => s,
|
identifier_body
|
filesystem.rs
|
use std::path::Path;
use std::fs::File;
use std::io::prelude::*;
use std::env;
pub fn check_extension(path: &Path, valid_ext: &[&str]) -> bool {
let ext = &path.extension().expect("The file has no extension").to_str().expect("Extension is not valid utf8");
for vext in valid_ext.iter() {
if vext == ext {
return true;
}
}
false
}
pub fn read_file(path_str: &str) -> String {
let mut path = env::current_dir().unwrap();
path.push(Path::new(path_str));
if!path.exists()
|
let mut f = match File::open(&path) {
Ok(f) => f,
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
};
// read bytes and return as str
let mut bytes = Vec::new();
match f.read_to_end(&mut bytes) {
Ok(_) => {
match String::from_utf8(bytes) {
Ok(s) => s,
Err(msg) => panic!("Found non valid utf8 characters in {} : {}.", path.display(), msg)
}
},
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
}
}
|
{
panic!("Error reading file, path {} doesn't exist.", path.display());
}
|
conditional_block
|
filesystem.rs
|
use std::path::Path;
use std::fs::File;
use std::io::prelude::*;
use std::env;
pub fn
|
(path: &Path, valid_ext: &[&str]) -> bool {
let ext = &path.extension().expect("The file has no extension").to_str().expect("Extension is not valid utf8");
for vext in valid_ext.iter() {
if vext == ext {
return true;
}
}
false
}
pub fn read_file(path_str: &str) -> String {
let mut path = env::current_dir().unwrap();
path.push(Path::new(path_str));
if!path.exists() {
panic!("Error reading file, path {} doesn't exist.", path.display());
}
let mut f = match File::open(&path) {
Ok(f) => f,
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
};
// read bytes and return as str
let mut bytes = Vec::new();
match f.read_to_end(&mut bytes) {
Ok(_) => {
match String::from_utf8(bytes) {
Ok(s) => s,
Err(msg) => panic!("Found non valid utf8 characters in {} : {}.", path.display(), msg)
}
},
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
}
}
|
check_extension
|
identifier_name
|
filesystem.rs
|
use std::path::Path;
use std::fs::File;
use std::io::prelude::*;
use std::env;
pub fn check_extension(path: &Path, valid_ext: &[&str]) -> bool {
let ext = &path.extension().expect("The file has no extension").to_str().expect("Extension is not valid utf8");
for vext in valid_ext.iter() {
if vext == ext {
return true;
}
}
false
}
pub fn read_file(path_str: &str) -> String {
let mut path = env::current_dir().unwrap();
path.push(Path::new(path_str));
if!path.exists() {
panic!("Error reading file, path {} doesn't exist.", path.display());
}
let mut f = match File::open(&path) {
Ok(f) => f,
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
};
// read bytes and return as str
let mut bytes = Vec::new();
match f.read_to_end(&mut bytes) {
Ok(_) => {
match String::from_utf8(bytes) {
Ok(s) => s,
Err(msg) => panic!("Found non valid utf8 characters in {} : {}.", path.display(), msg)
}
|
}
|
},
Err(msg) => panic!("Error reading file {} : {}.", path.display(), msg)
}
|
random_line_split
|
error.rs
|
// Copyright (c) 2016 Nikita Pekin and the smexybot contributors
// See the README.md file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hyper;
use serde_json;
use std::error::Error as StdError;
use std::fmt;
use std::io;
use std::result::Result as StdResult;
use url;
/// A convenient alias type for results for `smexybot`.
pub type Result<T> = StdResult<T, Error>;
/// Represents errors which occur while using Smexybot.
#[derive(Debug)]
pub enum Error {
/// A `hyper` crate error.
Hyper(hyper::Error),
/// An IO error was encountered.
Io(io::Error),
/// A `serde` crate error.
Serde(serde_json::Error),
/// Error while parsing a URL.
UrlParse(url::ParseError),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match *self {
Hyper(ref e) => e.fmt(f),
Io(ref e) => e.fmt(f),
Serde(ref e) => e.fmt(f),
UrlParse(ref e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn description(&self) -> &str {
use self::Error::*;
match *self {
Hyper(ref e) => e.description(),
Io(ref e) => e.description(),
Serde(ref e) => e.description(),
UrlParse(ref e) => e.description(),
}
}
fn
|
(&self) -> Option<&StdError> {
use self::Error::*;
match *self {
Hyper(ref e) => e.cause(),
Io(ref e) => e.cause(),
Serde(ref e) => e.cause(),
UrlParse(ref e) => e.cause(),
}
}
}
impl From<hyper::Error> for Error {
fn from(error: hyper::Error) -> Error {
Error::Hyper(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error)
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Error {
Error::Serde(error)
}
}
impl From<url::ParseError> for Error {
fn from(error: url::ParseError) -> Error {
Error::UrlParse(error)
}
}
|
cause
|
identifier_name
|
error.rs
|
// Copyright (c) 2016 Nikita Pekin and the smexybot contributors
// See the README.md file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hyper;
use serde_json;
use std::error::Error as StdError;
use std::fmt;
use std::io;
use std::result::Result as StdResult;
use url;
/// A convenient alias type for results for `smexybot`.
pub type Result<T> = StdResult<T, Error>;
/// Represents errors which occur while using Smexybot.
#[derive(Debug)]
pub enum Error {
|
Io(io::Error),
/// A `serde` crate error.
Serde(serde_json::Error),
/// Error while parsing a URL.
UrlParse(url::ParseError),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match *self {
Hyper(ref e) => e.fmt(f),
Io(ref e) => e.fmt(f),
Serde(ref e) => e.fmt(f),
UrlParse(ref e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn description(&self) -> &str {
use self::Error::*;
match *self {
Hyper(ref e) => e.description(),
Io(ref e) => e.description(),
Serde(ref e) => e.description(),
UrlParse(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&StdError> {
use self::Error::*;
match *self {
Hyper(ref e) => e.cause(),
Io(ref e) => e.cause(),
Serde(ref e) => e.cause(),
UrlParse(ref e) => e.cause(),
}
}
}
impl From<hyper::Error> for Error {
fn from(error: hyper::Error) -> Error {
Error::Hyper(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error)
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Error {
Error::Serde(error)
}
}
impl From<url::ParseError> for Error {
fn from(error: url::ParseError) -> Error {
Error::UrlParse(error)
}
}
|
/// A `hyper` crate error.
Hyper(hyper::Error),
/// An IO error was encountered.
|
random_line_split
|
error.rs
|
// Copyright (c) 2016 Nikita Pekin and the smexybot contributors
// See the README.md file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hyper;
use serde_json;
use std::error::Error as StdError;
use std::fmt;
use std::io;
use std::result::Result as StdResult;
use url;
/// A convenient alias type for results for `smexybot`.
pub type Result<T> = StdResult<T, Error>;
/// Represents errors which occur while using Smexybot.
#[derive(Debug)]
pub enum Error {
/// A `hyper` crate error.
Hyper(hyper::Error),
/// An IO error was encountered.
Io(io::Error),
/// A `serde` crate error.
Serde(serde_json::Error),
/// Error while parsing a URL.
UrlParse(url::ParseError),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
impl StdError for Error {
fn description(&self) -> &str {
use self::Error::*;
match *self {
Hyper(ref e) => e.description(),
Io(ref e) => e.description(),
Serde(ref e) => e.description(),
UrlParse(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&StdError> {
use self::Error::*;
match *self {
Hyper(ref e) => e.cause(),
Io(ref e) => e.cause(),
Serde(ref e) => e.cause(),
UrlParse(ref e) => e.cause(),
}
}
}
impl From<hyper::Error> for Error {
fn from(error: hyper::Error) -> Error {
Error::Hyper(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error)
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Error {
Error::Serde(error)
}
}
impl From<url::ParseError> for Error {
fn from(error: url::ParseError) -> Error {
Error::UrlParse(error)
}
}
|
{
use self::Error::*;
match *self {
Hyper(ref e) => e.fmt(f),
Io(ref e) => e.fmt(f),
Serde(ref e) => e.fmt(f),
UrlParse(ref e) => e.fmt(f),
}
}
|
identifier_body
|
collision.rs
|
use hlt::entity::{Entity, Position};
pub fn
|
<E: Entity, F: Entity, G: Entity>(start: &E, end: &F, circle: &G, fudge: f64) -> bool {
let Position(start_x, start_y) = start.get_position();
let Position(end_x, end_y) = end.get_position();
let Position(circle_x, circle_y) = circle.get_position();
let dx = end_x - start_x;
let dy = end_y - start_y;
let a = dx.powi(2) + dy.powi(2);
let b = -2.0 * (start_x.powi(2) - start_x*end_x - start_x*circle_x + end_x*circle_x +
start_y.powi(2) - start_y*end_y - start_y*circle_y + end_y*circle_y);
if a == 0.0 {
// Start and end are the same point.
return start.calculate_distance_between(circle) <= circle.get_radius() + fudge;
}
let &t = [-b / (2.0 * a), 1.0].iter().min_by(|x, y| x.partial_cmp(y).unwrap()).unwrap();
if t < 0.0 {
return false;
}
let closest_x = start_x + dx * t;
let closest_y = start_y + dy * t;
let closest_distance = Position(closest_x, closest_y).calculate_distance_between(circle);
return closest_distance <= circle.get_radius() + fudge
}
|
intersect_segment_circle
|
identifier_name
|
collision.rs
|
use hlt::entity::{Entity, Position};
pub fn intersect_segment_circle<E: Entity, F: Entity, G: Entity>(start: &E, end: &F, circle: &G, fudge: f64) -> bool {
let Position(start_x, start_y) = start.get_position();
let Position(end_x, end_y) = end.get_position();
let Position(circle_x, circle_y) = circle.get_position();
let dx = end_x - start_x;
let dy = end_y - start_y;
let a = dx.powi(2) + dy.powi(2);
let b = -2.0 * (start_x.powi(2) - start_x*end_x - start_x*circle_x + end_x*circle_x +
start_y.powi(2) - start_y*end_y - start_y*circle_y + end_y*circle_y);
if a == 0.0 {
// Start and end are the same point.
return start.calculate_distance_between(circle) <= circle.get_radius() + fudge;
}
let &t = [-b / (2.0 * a), 1.0].iter().min_by(|x, y| x.partial_cmp(y).unwrap()).unwrap();
if t < 0.0
|
let closest_x = start_x + dx * t;
let closest_y = start_y + dy * t;
let closest_distance = Position(closest_x, closest_y).calculate_distance_between(circle);
return closest_distance <= circle.get_radius() + fudge
}
|
{
return false;
}
|
conditional_block
|
collision.rs
|
let Position(end_x, end_y) = end.get_position();
let Position(circle_x, circle_y) = circle.get_position();
let dx = end_x - start_x;
let dy = end_y - start_y;
let a = dx.powi(2) + dy.powi(2);
let b = -2.0 * (start_x.powi(2) - start_x*end_x - start_x*circle_x + end_x*circle_x +
start_y.powi(2) - start_y*end_y - start_y*circle_y + end_y*circle_y);
if a == 0.0 {
// Start and end are the same point.
return start.calculate_distance_between(circle) <= circle.get_radius() + fudge;
}
let &t = [-b / (2.0 * a), 1.0].iter().min_by(|x, y| x.partial_cmp(y).unwrap()).unwrap();
if t < 0.0 {
return false;
}
let closest_x = start_x + dx * t;
let closest_y = start_y + dy * t;
let closest_distance = Position(closest_x, closest_y).calculate_distance_between(circle);
return closest_distance <= circle.get_radius() + fudge
}
|
use hlt::entity::{Entity, Position};
pub fn intersect_segment_circle<E: Entity, F: Entity, G: Entity>(start: &E, end: &F, circle: &G, fudge: f64) -> bool {
let Position(start_x, start_y) = start.get_position();
|
random_line_split
|
|
collision.rs
|
use hlt::entity::{Entity, Position};
pub fn intersect_segment_circle<E: Entity, F: Entity, G: Entity>(start: &E, end: &F, circle: &G, fudge: f64) -> bool
|
let closest_x = start_x + dx * t;
let closest_y = start_y + dy * t;
let closest_distance = Position(closest_x, closest_y).calculate_distance_between(circle);
return closest_distance <= circle.get_radius() + fudge
}
|
{
let Position(start_x, start_y) = start.get_position();
let Position(end_x, end_y) = end.get_position();
let Position(circle_x, circle_y) = circle.get_position();
let dx = end_x - start_x;
let dy = end_y - start_y;
let a = dx.powi(2) + dy.powi(2);
let b = -2.0 * (start_x.powi(2) - start_x*end_x - start_x*circle_x + end_x*circle_x +
start_y.powi(2) - start_y*end_y - start_y*circle_y + end_y*circle_y);
if a == 0.0 {
// Start and end are the same point.
return start.calculate_distance_between(circle) <= circle.get_radius() + fudge;
}
let &t = [-b / (2.0 * a), 1.0].iter().min_by(|x, y| x.partial_cmp(y).unwrap()).unwrap();
if t < 0.0 {
return false;
}
|
identifier_body
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types that share their serialization implementations
//! for both specified and computed values.
use counter_style::{Symbols, parse_counter_style_name};
use cssparser::Parser;
use parser::{Parse, ParserContext};
use std::fmt;
use style_traits::{Comma, OneOrMoreSeparated, ParseError, StyleParseError, ToCss};
use super::CustomIdent;
pub mod background;
pub mod basic_shape;
pub mod border;
#[path = "box.rs"]
pub mod box_;
pub mod effects;
pub mod flex;
#[cfg(feature = "gecko")]
pub mod gecko;
pub mod grid;
pub mod image;
pub mod position;
pub mod rect;
pub mod svg;
pub mod text;
pub mod transform;
// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type
define_css_keyword_enum! { SymbolsType:
"cyclic" => Cyclic,
"numeric" => Numeric,
"alphabetic" => Alphabetic,
"symbolic" => Symbolic,
"fixed" => Fixed,
}
add_impls_for_keyword_enum!(SymbolsType);
#[cfg(feature = "gecko")]
impl SymbolsType {
/// Convert symbols type to their corresponding Gecko values.
pub fn to_gecko_keyword(self) -> u8 {
use gecko_bindings::structs;
match self {
SymbolsType::Cyclic => structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC as u8,
SymbolsType::Numeric => structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC as u8,
SymbolsType::Alphabetic => structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC as u8,
SymbolsType::Symbolic => structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC as u8,
SymbolsType::Fixed => structs::NS_STYLE_COUNTER_SYSTEM_FIXED as u8,
}
}
/// Convert Gecko value to symbol type.
pub fn from_gecko_keyword(gecko_value: u32) -> SymbolsType {
use gecko_bindings::structs;
match gecko_value {
structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC => SymbolsType::Cyclic,
structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC => SymbolsType::Numeric,
structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC => SymbolsType::Alphabetic,
structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC => SymbolsType::Symbolic,
structs::NS_STYLE_COUNTER_SYSTEM_FIXED => SymbolsType::Fixed,
x => panic!("Unexpected value for symbol type {}", x)
}
}
}
/// https://drafts.csswg.org/css-counter-styles/#typedef-counter-style
///
/// Since wherever <counter-style> is used, 'none' is a valid value as
/// well, we combine them into one type to make code simpler.
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss)]
pub enum CounterStyleOrNone {
/// `none`
None,
/// `<counter-style-name>`
Name(CustomIdent),
/// `symbols()`
#[css(function)]
Symbols(SymbolsType, Symbols),
}
impl CounterStyleOrNone {
/// disc value
pub fn disc() -> Self {
CounterStyleOrNone::Name(CustomIdent(atom!("disc")))
}
/// decimal value
pub fn decimal() -> Self {
CounterStyleOrNone::Name(CustomIdent(atom!("decimal")))
}
}
impl Parse for CounterStyleOrNone {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(name) = input.try(|i| parse_counter_style_name(i)) {
return Ok(CounterStyleOrNone::Name(name));
}
if input.try(|i| i.expect_ident_matching("none")).is_ok() {
return Ok(CounterStyleOrNone::None);
}
if input.try(|i| i.expect_function_matching("symbols")).is_ok() {
return input.parse_nested_block(|input| {
let symbols_type = input.try(|i| SymbolsType::parse(i))
.unwrap_or(SymbolsType::Symbolic);
let symbols = Symbols::parse(context, input)?;
// There must be at least two symbols for alphabetic or
// numeric system.
if (symbols_type == SymbolsType::Alphabetic ||
symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 {
return Err(StyleParseError::UnspecifiedError.into());
}
// Identifier is not allowed in symbols() function.
if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) {
return Err(StyleParseError::UnspecifiedError.into());
}
Ok(CounterStyleOrNone::Symbols(symbols_type, symbols))
});
}
Err(StyleParseError::UnspecifiedError.into())
}
}
/// A settings tag, defined by a four-character tag and a setting value
///
/// For font-feature-settings, this is a tag and an integer,
/// for font-variation-settings this is a tag and a float
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue)]
pub struct FontSettingTag<T> {
/// A four-character tag, packed into a u32 (one byte per character)
pub tag: u32,
/// The value
pub value: T,
}
impl<T> OneOrMoreSeparated for FontSettingTag<T> {
type S = Comma;
}
impl<T: ToCss> ToCss for FontSettingTag<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use byteorder::{BigEndian, ByteOrder};
use std::str;
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, self.tag);
str::from_utf8(&raw).unwrap_or_default().to_css(dest)?;
self.value.to_css(dest)
}
}
impl<T: Parse> Parse for FontSettingTag<T> {
/// https://www.w3.org/TR/css-fonts-3/#propdef-font-feature-settings
/// https://drafts.csswg.org/css-fonts-4/#low-level-font-variation-
/// settings-control-the-font-variation-settings-property
/// <string> [ on | off | <integer> ]
/// <string> <number>
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
use byteorder::{ReadBytesExt, BigEndian};
use std::io::Cursor;
let u_tag;
{
let tag = input.expect_string()?;
// allowed strings of length 4 containing chars: <U+20, U+7E>
if tag.len()!= 4 ||
tag.chars().any(|c| c <'' || c > '~')
{
return Err(StyleParseError::UnspecifiedError.into())
}
let mut raw = Cursor::new(tag.as_bytes());
u_tag = raw.read_u32::<BigEndian>().unwrap();
}
Ok(FontSettingTag { tag: u_tag, value: T::parse(context, input)? })
}
}
/// A font settings value for font-variation-settings or font-feature-settings
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss)]
pub enum FontSettings<T> {
/// No settings (default)
Normal,
/// Set of settings
Tag(Vec<FontSettingTag<T>>)
}
impl<T: Parse> Parse for FontSettings<T> {
/// https://www.w3.org/TR/css-fonts-3/#propdef-font-feature-settings
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(FontSettings::Normal);
}
Vec::parse(context, input).map(FontSettings::Tag)
}
}
/// An integer that can also parse "on" and "off",
/// for font-feature-settings
///
/// Do not use this type anywhere except within FontSettings
/// because it serializes with the preceding space
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, Eq, PartialEq, ToComputedValue)]
pub struct FontSettingTagInt(pub u32);
/// A number value to be used for font-variation-settings
///
/// Do not use this type anywhere except within FontSettings
/// because it serializes with the preceding space
#[cfg_attr(feature = "gecko", derive(Animate, ComputeSquaredDistance))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToComputedValue)]
pub struct FontSettingTagFloat(pub f32);
impl ToCss for FontSettingTagInt {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match self.0 {
1 => Ok(()),
0 => dest.write_str(" off"),
x => {
dest.write_char(' ')?;
x.to_css(dest)
}
}
}
}
impl Parse for FontSettingTagInt {
fn parse<'i, 't>(_context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(value) = input.try(|input| input.expect_integer()) {
// handle integer, throw if it is negative
if value >= 0 {
Ok(FontSettingTagInt(value as u32))
} else {
Err(StyleParseError::UnspecifiedError.into())
}
} else if let Ok(_) = input.try(|input| input.expect_ident_matching("on")) {
// on is an alias for '1'
Ok(FontSettingTagInt(1))
} else if let Ok(_) = input.try(|input| input.expect_ident_matching("off")) {
// off is an alias for '0'
Ok(FontSettingTagInt(0))
} else {
// empty value is an alias for '1'
Ok(FontSettingTagInt(1))
}
}
}
impl Parse for FontSettingTagFloat {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>>
|
}
impl ToCss for FontSettingTagFloat {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str(" ")?;
self.0.to_css(dest)
}
}
/// A wrapper of Non-negative values.
#[cfg_attr(feature = "servo", derive(Deserialize, HeapSizeOf, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero, ToComputedValue, ToCss)]
pub struct NonNegative<T>(pub T);
/// A wrapper of greater-than-or-equal-to-one values.
#[cfg_attr(feature = "servo", derive(Deserialize, HeapSizeOf, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero, ToComputedValue, ToCss)]
pub struct GreaterThanOrEqualToOne<T>(pub T);
|
{
input.expect_number().map(FontSettingTagFloat).map_err(|e| e.into())
}
|
identifier_body
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types that share their serialization implementations
//! for both specified and computed values.
use counter_style::{Symbols, parse_counter_style_name};
use cssparser::Parser;
use parser::{Parse, ParserContext};
use std::fmt;
use style_traits::{Comma, OneOrMoreSeparated, ParseError, StyleParseError, ToCss};
use super::CustomIdent;
pub mod background;
pub mod basic_shape;
pub mod border;
#[path = "box.rs"]
pub mod box_;
pub mod effects;
pub mod flex;
#[cfg(feature = "gecko")]
pub mod gecko;
pub mod grid;
pub mod image;
pub mod position;
pub mod rect;
pub mod svg;
pub mod text;
pub mod transform;
// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type
define_css_keyword_enum! { SymbolsType:
"cyclic" => Cyclic,
"numeric" => Numeric,
"alphabetic" => Alphabetic,
"symbolic" => Symbolic,
"fixed" => Fixed,
}
add_impls_for_keyword_enum!(SymbolsType);
#[cfg(feature = "gecko")]
impl SymbolsType {
/// Convert symbols type to their corresponding Gecko values.
pub fn to_gecko_keyword(self) -> u8 {
use gecko_bindings::structs;
match self {
SymbolsType::Cyclic => structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC as u8,
SymbolsType::Numeric => structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC as u8,
SymbolsType::Alphabetic => structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC as u8,
SymbolsType::Symbolic => structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC as u8,
SymbolsType::Fixed => structs::NS_STYLE_COUNTER_SYSTEM_FIXED as u8,
}
}
/// Convert Gecko value to symbol type.
pub fn from_gecko_keyword(gecko_value: u32) -> SymbolsType {
use gecko_bindings::structs;
match gecko_value {
structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC => SymbolsType::Cyclic,
structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC => SymbolsType::Numeric,
structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC => SymbolsType::Alphabetic,
structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC => SymbolsType::Symbolic,
structs::NS_STYLE_COUNTER_SYSTEM_FIXED => SymbolsType::Fixed,
x => panic!("Unexpected value for symbol type {}", x)
}
}
}
/// https://drafts.csswg.org/css-counter-styles/#typedef-counter-style
///
/// Since wherever <counter-style> is used, 'none' is a valid value as
/// well, we combine them into one type to make code simpler.
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss)]
pub enum CounterStyleOrNone {
/// `none`
None,
/// `<counter-style-name>`
Name(CustomIdent),
/// `symbols()`
#[css(function)]
Symbols(SymbolsType, Symbols),
}
impl CounterStyleOrNone {
/// disc value
pub fn disc() -> Self {
CounterStyleOrNone::Name(CustomIdent(atom!("disc")))
}
/// decimal value
pub fn decimal() -> Self {
CounterStyleOrNone::Name(CustomIdent(atom!("decimal")))
}
}
impl Parse for CounterStyleOrNone {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(name) = input.try(|i| parse_counter_style_name(i)) {
return Ok(CounterStyleOrNone::Name(name));
}
if input.try(|i| i.expect_ident_matching("none")).is_ok() {
return Ok(CounterStyleOrNone::None);
}
if input.try(|i| i.expect_function_matching("symbols")).is_ok() {
return input.parse_nested_block(|input| {
let symbols_type = input.try(|i| SymbolsType::parse(i))
.unwrap_or(SymbolsType::Symbolic);
let symbols = Symbols::parse(context, input)?;
// There must be at least two symbols for alphabetic or
// numeric system.
if (symbols_type == SymbolsType::Alphabetic ||
symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 {
return Err(StyleParseError::UnspecifiedError.into());
}
// Identifier is not allowed in symbols() function.
if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) {
return Err(StyleParseError::UnspecifiedError.into());
}
Ok(CounterStyleOrNone::Symbols(symbols_type, symbols))
});
}
Err(StyleParseError::UnspecifiedError.into())
}
}
/// A settings tag, defined by a four-character tag and a setting value
///
/// For font-feature-settings, this is a tag and an integer,
/// for font-variation-settings this is a tag and a float
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue)]
pub struct FontSettingTag<T> {
/// A four-character tag, packed into a u32 (one byte per character)
pub tag: u32,
/// The value
pub value: T,
}
impl<T> OneOrMoreSeparated for FontSettingTag<T> {
type S = Comma;
}
impl<T: ToCss> ToCss for FontSettingTag<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use byteorder::{BigEndian, ByteOrder};
use std::str;
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, self.tag);
str::from_utf8(&raw).unwrap_or_default().to_css(dest)?;
self.value.to_css(dest)
}
}
impl<T: Parse> Parse for FontSettingTag<T> {
/// https://www.w3.org/TR/css-fonts-3/#propdef-font-feature-settings
/// https://drafts.csswg.org/css-fonts-4/#low-level-font-variation-
/// settings-control-the-font-variation-settings-property
/// <string> [ on | off | <integer> ]
/// <string> <number>
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
use byteorder::{ReadBytesExt, BigEndian};
use std::io::Cursor;
let u_tag;
{
let tag = input.expect_string()?;
// allowed strings of length 4 containing chars: <U+20, U+7E>
if tag.len()!= 4 ||
tag.chars().any(|c| c <'' || c > '~')
{
return Err(StyleParseError::UnspecifiedError.into())
}
let mut raw = Cursor::new(tag.as_bytes());
u_tag = raw.read_u32::<BigEndian>().unwrap();
}
Ok(FontSettingTag { tag: u_tag, value: T::parse(context, input)? })
}
}
/// A font settings value for font-variation-settings or font-feature-settings
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss)]
pub enum FontSettings<T> {
/// No settings (default)
Normal,
/// Set of settings
Tag(Vec<FontSettingTag<T>>)
}
impl<T: Parse> Parse for FontSettings<T> {
/// https://www.w3.org/TR/css-fonts-3/#propdef-font-feature-settings
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
|
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(FontSettings::Normal);
}
Vec::parse(context, input).map(FontSettings::Tag)
}
}
/// An integer that can also parse "on" and "off",
/// for font-feature-settings
///
/// Do not use this type anywhere except within FontSettings
/// because it serializes with the preceding space
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, Eq, PartialEq, ToComputedValue)]
pub struct FontSettingTagInt(pub u32);
/// A number value to be used for font-variation-settings
///
/// Do not use this type anywhere except within FontSettings
/// because it serializes with the preceding space
#[cfg_attr(feature = "gecko", derive(Animate, ComputeSquaredDistance))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToComputedValue)]
pub struct FontSettingTagFloat(pub f32);
impl ToCss for FontSettingTagInt {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match self.0 {
1 => Ok(()),
0 => dest.write_str(" off"),
x => {
dest.write_char(' ')?;
x.to_css(dest)
}
}
}
}
impl Parse for FontSettingTagInt {
fn parse<'i, 't>(_context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(value) = input.try(|input| input.expect_integer()) {
// handle integer, throw if it is negative
if value >= 0 {
Ok(FontSettingTagInt(value as u32))
} else {
Err(StyleParseError::UnspecifiedError.into())
}
} else if let Ok(_) = input.try(|input| input.expect_ident_matching("on")) {
// on is an alias for '1'
Ok(FontSettingTagInt(1))
} else if let Ok(_) = input.try(|input| input.expect_ident_matching("off")) {
// off is an alias for '0'
Ok(FontSettingTagInt(0))
} else {
// empty value is an alias for '1'
Ok(FontSettingTagInt(1))
}
}
}
impl Parse for FontSettingTagFloat {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
input.expect_number().map(FontSettingTagFloat).map_err(|e| e.into())
}
}
impl ToCss for FontSettingTagFloat {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str(" ")?;
self.0.to_css(dest)
}
}
/// A wrapper of Non-negative values.
#[cfg_attr(feature = "servo", derive(Deserialize, HeapSizeOf, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero, ToComputedValue, ToCss)]
pub struct NonNegative<T>(pub T);
/// A wrapper of greater-than-or-equal-to-one values.
#[cfg_attr(feature = "servo", derive(Deserialize, HeapSizeOf, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero, ToComputedValue, ToCss)]
pub struct GreaterThanOrEqualToOne<T>(pub T);
|
random_line_split
|
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types that share their serialization implementations
//! for both specified and computed values.
use counter_style::{Symbols, parse_counter_style_name};
use cssparser::Parser;
use parser::{Parse, ParserContext};
use std::fmt;
use style_traits::{Comma, OneOrMoreSeparated, ParseError, StyleParseError, ToCss};
use super::CustomIdent;
pub mod background;
pub mod basic_shape;
pub mod border;
#[path = "box.rs"]
pub mod box_;
pub mod effects;
pub mod flex;
#[cfg(feature = "gecko")]
pub mod gecko;
pub mod grid;
pub mod image;
pub mod position;
pub mod rect;
pub mod svg;
pub mod text;
pub mod transform;
// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type
define_css_keyword_enum! { SymbolsType:
"cyclic" => Cyclic,
"numeric" => Numeric,
"alphabetic" => Alphabetic,
"symbolic" => Symbolic,
"fixed" => Fixed,
}
add_impls_for_keyword_enum!(SymbolsType);
#[cfg(feature = "gecko")]
impl SymbolsType {
/// Convert symbols type to their corresponding Gecko values.
pub fn to_gecko_keyword(self) -> u8 {
use gecko_bindings::structs;
match self {
SymbolsType::Cyclic => structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC as u8,
SymbolsType::Numeric => structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC as u8,
SymbolsType::Alphabetic => structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC as u8,
SymbolsType::Symbolic => structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC as u8,
SymbolsType::Fixed => structs::NS_STYLE_COUNTER_SYSTEM_FIXED as u8,
}
}
/// Convert Gecko value to symbol type.
pub fn from_gecko_keyword(gecko_value: u32) -> SymbolsType {
use gecko_bindings::structs;
match gecko_value {
structs::NS_STYLE_COUNTER_SYSTEM_CYCLIC => SymbolsType::Cyclic,
structs::NS_STYLE_COUNTER_SYSTEM_NUMERIC => SymbolsType::Numeric,
structs::NS_STYLE_COUNTER_SYSTEM_ALPHABETIC => SymbolsType::Alphabetic,
structs::NS_STYLE_COUNTER_SYSTEM_SYMBOLIC => SymbolsType::Symbolic,
structs::NS_STYLE_COUNTER_SYSTEM_FIXED => SymbolsType::Fixed,
x => panic!("Unexpected value for symbol type {}", x)
}
}
}
/// https://drafts.csswg.org/css-counter-styles/#typedef-counter-style
///
/// Since wherever <counter-style> is used, 'none' is a valid value as
/// well, we combine them into one type to make code simpler.
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss)]
pub enum CounterStyleOrNone {
/// `none`
None,
/// `<counter-style-name>`
Name(CustomIdent),
/// `symbols()`
#[css(function)]
Symbols(SymbolsType, Symbols),
}
impl CounterStyleOrNone {
/// disc value
pub fn disc() -> Self {
CounterStyleOrNone::Name(CustomIdent(atom!("disc")))
}
/// decimal value
pub fn decimal() -> Self {
CounterStyleOrNone::Name(CustomIdent(atom!("decimal")))
}
}
impl Parse for CounterStyleOrNone {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(name) = input.try(|i| parse_counter_style_name(i)) {
return Ok(CounterStyleOrNone::Name(name));
}
if input.try(|i| i.expect_ident_matching("none")).is_ok() {
return Ok(CounterStyleOrNone::None);
}
if input.try(|i| i.expect_function_matching("symbols")).is_ok() {
return input.parse_nested_block(|input| {
let symbols_type = input.try(|i| SymbolsType::parse(i))
.unwrap_or(SymbolsType::Symbolic);
let symbols = Symbols::parse(context, input)?;
// There must be at least two symbols for alphabetic or
// numeric system.
if (symbols_type == SymbolsType::Alphabetic ||
symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 {
return Err(StyleParseError::UnspecifiedError.into());
}
// Identifier is not allowed in symbols() function.
if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) {
return Err(StyleParseError::UnspecifiedError.into());
}
Ok(CounterStyleOrNone::Symbols(symbols_type, symbols))
});
}
Err(StyleParseError::UnspecifiedError.into())
}
}
/// A settings tag, defined by a four-character tag and a setting value
///
/// For font-feature-settings, this is a tag and an integer,
/// for font-variation-settings this is a tag and a float
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue)]
pub struct FontSettingTag<T> {
/// A four-character tag, packed into a u32 (one byte per character)
pub tag: u32,
/// The value
pub value: T,
}
impl<T> OneOrMoreSeparated for FontSettingTag<T> {
type S = Comma;
}
impl<T: ToCss> ToCss for FontSettingTag<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use byteorder::{BigEndian, ByteOrder};
use std::str;
let mut raw = [0u8; 4];
BigEndian::write_u32(&mut raw, self.tag);
str::from_utf8(&raw).unwrap_or_default().to_css(dest)?;
self.value.to_css(dest)
}
}
impl<T: Parse> Parse for FontSettingTag<T> {
/// https://www.w3.org/TR/css-fonts-3/#propdef-font-feature-settings
/// https://drafts.csswg.org/css-fonts-4/#low-level-font-variation-
/// settings-control-the-font-variation-settings-property
/// <string> [ on | off | <integer> ]
/// <string> <number>
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
use byteorder::{ReadBytesExt, BigEndian};
use std::io::Cursor;
let u_tag;
{
let tag = input.expect_string()?;
// allowed strings of length 4 containing chars: <U+20, U+7E>
if tag.len()!= 4 ||
tag.chars().any(|c| c <'' || c > '~')
{
return Err(StyleParseError::UnspecifiedError.into())
}
let mut raw = Cursor::new(tag.as_bytes());
u_tag = raw.read_u32::<BigEndian>().unwrap();
}
Ok(FontSettingTag { tag: u_tag, value: T::parse(context, input)? })
}
}
/// A font settings value for font-variation-settings or font-feature-settings
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss)]
pub enum FontSettings<T> {
/// No settings (default)
Normal,
/// Set of settings
Tag(Vec<FontSettingTag<T>>)
}
impl<T: Parse> Parse for FontSettings<T> {
/// https://www.w3.org/TR/css-fonts-3/#propdef-font-feature-settings
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(FontSettings::Normal);
}
Vec::parse(context, input).map(FontSettings::Tag)
}
}
/// An integer that can also parse "on" and "off",
/// for font-feature-settings
///
/// Do not use this type anywhere except within FontSettings
/// because it serializes with the preceding space
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, Eq, PartialEq, ToComputedValue)]
pub struct FontSettingTagInt(pub u32);
/// A number value to be used for font-variation-settings
///
/// Do not use this type anywhere except within FontSettings
/// because it serializes with the preceding space
#[cfg_attr(feature = "gecko", derive(Animate, ComputeSquaredDistance))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToComputedValue)]
pub struct FontSettingTagFloat(pub f32);
impl ToCss for FontSettingTagInt {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match self.0 {
1 => Ok(()),
0 => dest.write_str(" off"),
x => {
dest.write_char(' ')?;
x.to_css(dest)
}
}
}
}
impl Parse for FontSettingTagInt {
fn parse<'i, 't>(_context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(value) = input.try(|input| input.expect_integer()) {
// handle integer, throw if it is negative
if value >= 0 {
Ok(FontSettingTagInt(value as u32))
} else {
Err(StyleParseError::UnspecifiedError.into())
}
} else if let Ok(_) = input.try(|input| input.expect_ident_matching("on")) {
// on is an alias for '1'
Ok(FontSettingTagInt(1))
} else if let Ok(_) = input.try(|input| input.expect_ident_matching("off")) {
// off is an alias for '0'
Ok(FontSettingTagInt(0))
} else {
// empty value is an alias for '1'
Ok(FontSettingTagInt(1))
}
}
}
impl Parse for FontSettingTagFloat {
fn
|
<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
input.expect_number().map(FontSettingTagFloat).map_err(|e| e.into())
}
}
impl ToCss for FontSettingTagFloat {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str(" ")?;
self.0.to_css(dest)
}
}
/// A wrapper of Non-negative values.
#[cfg_attr(feature = "servo", derive(Deserialize, HeapSizeOf, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero, ToComputedValue, ToCss)]
pub struct NonNegative<T>(pub T);
/// A wrapper of greater-than-or-equal-to-one values.
#[cfg_attr(feature = "servo", derive(Deserialize, HeapSizeOf, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero, ToComputedValue, ToCss)]
pub struct GreaterThanOrEqualToOne<T>(pub T);
|
parse
|
identifier_name
|
notestoreid.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use libimagstore::storeid::StoreId;
pub trait NoteStoreId {
fn is_note_id(&self) -> bool;
}
impl NoteStoreId for StoreId {
fn is_note_id(&self) -> bool
|
}
|
{
self.is_in_collection(&["notes"])
}
|
identifier_body
|
notestoreid.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use libimagstore::storeid::StoreId;
pub trait NoteStoreId {
fn is_note_id(&self) -> bool;
}
impl NoteStoreId for StoreId {
fn
|
(&self) -> bool {
self.is_in_collection(&["notes"])
}
}
|
is_note_id
|
identifier_name
|
notestoreid.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use libimagstore::storeid::StoreId;
|
impl NoteStoreId for StoreId {
fn is_note_id(&self) -> bool {
self.is_in_collection(&["notes"])
}
}
|
pub trait NoteStoreId {
fn is_note_id(&self) -> bool;
}
|
random_line_split
|
text.rs
|
// #[cfg(all(test, has_display))]
// use crate::*;
// use crate::tests;
// use std::env;
// use std::path;
/* TODO; the font API has changed and I don't want to deal with it now
#[test]
fn test_calculated_text_width() {
let ctx = &mut make_context();
let font = graphics::Font::default();
let text = "Hello There";
let expected_width = font.width(text);
let rendered_width = graphics::Text::new((text, font, 24)).unwrap().width();
println!("Text: {:?}, expected: {}, rendered: {}", text, expected_width, rendered_width);
assert_eq!(expected_width as usize, rendered_width as usize);
}
#[test]
fn test_monospace_text_is_actually_monospace() {
let ctx = &mut make_context();
let font = graphics::Font::new(ctx, "/DejaVuSansMono.ttf");
let text1 = "Hello 1";
let text2 = "Hello 2";
let text3 = "Hello 3";
let text4 = "Hello 4";
|
let width3 = font.width(text3);
let width4 = font.width(text4);
assert_eq!(width1, width2);
assert_eq!(width2, width3);
assert_eq!(width3, width4);
}
*/
|
let width1 = font.width(text1);
let width2 = font.width(text2);
|
random_line_split
|
gaussian_mutation.rs
|
/// Performs gaussian mutation on solution parameters
///
/// Performs gaussian mutation on real-valued solution parameters,
/// currently hard-coded for 30 problem variables for the ZDT1
/// synthetic test function.
extern crate rand;
use rand::{random, thread_rng, Rng};
// bounds hard-coded for ZDT1 i.e. (0,1)
pub fn gaussian_mutation(mut parameters: [f32; 30], mutation_rate: f32) -> [f32; 30]
|
{
let std = 1_f32 - 0_f32 / 10_f32;
for parameter in &mut parameters[..] {
if random::<f32>() <= mutation_rate {
let mutation = thread_rng().gen_range(-1.0f32, 1.0f32) * std;
*parameter = *parameter + mutation;
// Enforce bounds
*parameter = f32::max(*parameter, 0_f32);
*parameter = f32::min(*parameter, 1_f32);
}
}
return parameters;
}
|
identifier_body
|
|
gaussian_mutation.rs
|
/// Performs gaussian mutation on solution parameters
///
/// Performs gaussian mutation on real-valued solution parameters,
/// currently hard-coded for 30 problem variables for the ZDT1
/// synthetic test function.
extern crate rand;
use rand::{random, thread_rng, Rng};
// bounds hard-coded for ZDT1 i.e. (0,1)
pub fn
|
(mut parameters: [f32; 30], mutation_rate: f32) -> [f32; 30] {
let std = 1_f32 - 0_f32 / 10_f32;
for parameter in &mut parameters[..] {
if random::<f32>() <= mutation_rate {
let mutation = thread_rng().gen_range(-1.0f32, 1.0f32) * std;
*parameter = *parameter + mutation;
// Enforce bounds
*parameter = f32::max(*parameter, 0_f32);
*parameter = f32::min(*parameter, 1_f32);
}
}
return parameters;
}
|
gaussian_mutation
|
identifier_name
|
gaussian_mutation.rs
|
/// Performs gaussian mutation on solution parameters
///
/// Performs gaussian mutation on real-valued solution parameters,
/// currently hard-coded for 30 problem variables for the ZDT1
/// synthetic test function.
extern crate rand;
use rand::{random, thread_rng, Rng};
// bounds hard-coded for ZDT1 i.e. (0,1)
pub fn gaussian_mutation(mut parameters: [f32; 30], mutation_rate: f32) -> [f32; 30] {
let std = 1_f32 - 0_f32 / 10_f32;
for parameter in &mut parameters[..] {
if random::<f32>() <= mutation_rate {
let mutation = thread_rng().gen_range(-1.0f32, 1.0f32) * std;
*parameter = *parameter + mutation;
// Enforce bounds
*parameter = f32::max(*parameter, 0_f32);
|
return parameters;
}
|
*parameter = f32::min(*parameter, 1_f32);
}
}
|
random_line_split
|
gaussian_mutation.rs
|
/// Performs gaussian mutation on solution parameters
///
/// Performs gaussian mutation on real-valued solution parameters,
/// currently hard-coded for 30 problem variables for the ZDT1
/// synthetic test function.
extern crate rand;
use rand::{random, thread_rng, Rng};
// bounds hard-coded for ZDT1 i.e. (0,1)
pub fn gaussian_mutation(mut parameters: [f32; 30], mutation_rate: f32) -> [f32; 30] {
let std = 1_f32 - 0_f32 / 10_f32;
for parameter in &mut parameters[..] {
if random::<f32>() <= mutation_rate
|
}
return parameters;
}
|
{
let mutation = thread_rng().gen_range(-1.0f32, 1.0f32) * std;
*parameter = *parameter + mutation;
// Enforce bounds
*parameter = f32::max(*parameter, 0_f32);
*parameter = f32::min(*parameter, 1_f32);
}
|
conditional_block
|
privacy-ns1.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check we do the correct privacy checks when we import a name and there is an
// item with that name in both the value and type namespaces.
#![allow(dead_code)]
#![allow(unused_imports)]
// public type, private value
pub mod foo1 {
pub trait Bar {
}
pub struct Baz;
fn Bar() { }
}
fn test_glob1() {
use foo1::*;
Bar(); //~ ERROR unresolved name `Bar`
}
// private type, public value
pub mod foo2 {
trait Bar {
}
pub struct
|
;
pub fn Bar() { }
}
fn test_glob2() {
use foo2::*;
let _x: Box<Bar>; //~ ERROR use of undeclared type name `Bar`
}
// neither public
pub mod foo3 {
trait Bar {
}
pub struct Baz;
fn Bar() { }
}
fn test_glob3() {
use foo3::*;
Bar(); //~ ERROR unresolved name `Bar`
let _x: Box<Bar>; //~ ERROR use of undeclared type name `Bar`
}
fn main() {
}
|
Baz
|
identifier_name
|
privacy-ns1.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check we do the correct privacy checks when we import a name and there is an
// item with that name in both the value and type namespaces.
#![allow(dead_code)]
#![allow(unused_imports)]
// public type, private value
pub mod foo1 {
pub trait Bar {
}
pub struct Baz;
fn Bar() { }
}
fn test_glob1() {
use foo1::*;
Bar(); //~ ERROR unresolved name `Bar`
}
// private type, public value
pub mod foo2 {
trait Bar {
}
pub struct Baz;
pub fn Bar()
|
}
fn test_glob2() {
use foo2::*;
let _x: Box<Bar>; //~ ERROR use of undeclared type name `Bar`
}
// neither public
pub mod foo3 {
trait Bar {
}
pub struct Baz;
fn Bar() { }
}
fn test_glob3() {
use foo3::*;
Bar(); //~ ERROR unresolved name `Bar`
let _x: Box<Bar>; //~ ERROR use of undeclared type name `Bar`
}
fn main() {
}
|
{ }
|
identifier_body
|
privacy-ns1.rs
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check we do the correct privacy checks when we import a name and there is an
// item with that name in both the value and type namespaces.
#![allow(dead_code)]
#![allow(unused_imports)]
// public type, private value
pub mod foo1 {
pub trait Bar {
}
pub struct Baz;
fn Bar() { }
}
fn test_glob1() {
use foo1::*;
Bar(); //~ ERROR unresolved name `Bar`
}
// private type, public value
pub mod foo2 {
trait Bar {
}
pub struct Baz;
pub fn Bar() { }
}
fn test_glob2() {
use foo2::*;
let _x: Box<Bar>; //~ ERROR use of undeclared type name `Bar`
}
// neither public
pub mod foo3 {
trait Bar {
}
pub struct Baz;
fn Bar() { }
}
fn test_glob3() {
use foo3::*;
Bar(); //~ ERROR unresolved name `Bar`
let _x: Box<Bar>; //~ ERROR use of undeclared type name `Bar`
}
fn main() {
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
random_line_split
|
|
text.rs
|
::with_capacity(run_info_list.len());
for run_info in run_info_list {
let mut options = options;
options.script = run_info.script;
if run_info.bidi_level.is_rtl() {
options.flags.insert(ShapingFlags::RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
let (run, break_at_zero) = TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level,
linebreaker);
result.push((ScannedTextRun {
run: Arc::new(run),
insertion_point: run_info.insertion_point,
}, break_at_zero))
}
result
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
let mut prev_fragments_to_meld = Vec::new();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
let mut is_first_mapping_of_this_old_fragment = true;
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if is_first_mapping_of_this_old_fragment {
// There were no mappings for this unscanned fragment. Transfer its
// flags to the previous/next sibling elements instead.
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
prev_fragments_to_meld.push(old_fragment);
}
break;
}
};
let mapping = mappings.next().unwrap();
let (scanned_run, break_at_zero) = runs[mapping.text_run_index].clone();
let mut byte_range = Range::new(ByteIndex(mapping.byte_range.begin() as isize),
ByteIndex(mapping.byte_range.length() as isize));
let mut flags = ScannedTextFlags::empty();
if!break_at_zero && mapping.byte_range.begin() == 0 {
// If this is the first segment of the text run,
// and the text run doesn't break at zero, suppress line breaks
flags.insert(ScannedTextFlags::SUPPRESS_LINE_BREAK_BEFORE)
}
let text_size = old_fragment.border_box.size;
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text[mapping.byte_range.begin()..mapping.byte_range.end()]
.ends_with('\n');
if requires_line_break_afterward_if_wrapping_on_newlines {
byte_range.extend_by(ByteIndex(-1)); // Trim the '\n'
flags.insert(ScannedTextFlags::REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES);
}
if mapping.selected {
flags.insert(ScannedTextFlags::SELECTED);
}
let insertion_point = if mapping.contains_insertion_point(scanned_run.insertion_point) {
scanned_run.insertion_point
} else {
None
};
let mut new_text_fragment_info = Box::new(ScannedTextFragmentInfo::new(
scanned_run.run,
byte_range,
text_size,
insertion_point,
flags
));
let new_metrics = new_text_fragment_info.run.metrics_for_range(&byte_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let mut new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
let is_last_mapping_of_this_old_fragment = match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => false,
_ => true
};
if let Some(ref mut context) = new_fragment.inline_context {
for node in &mut context.nodes {
if!is_last_mapping_of_this_old_fragment {
node.flags.remove(InlineFragmentNodeFlags::LAST_FRAGMENT_OF_ELEMENT);
}
if!is_first_mapping_of_this_old_fragment {
node.flags.remove(InlineFragmentNodeFlags::FIRST_FRAGMENT_OF_ELEMENT);
}
}
}
for prev_fragment in prev_fragments_to_meld.drain(..) {
new_fragment.meld_with_prev_inline_fragment(&prev_fragment);
}
is_first_mapping_of_this_old_fragment = false;
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// TODO: When the text-orientation property is supported, the block and inline directions may
// be swapped for horizontal glyphs in vertical lines.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `style_structs::Font`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: ::ServoArc<style_structs::Font>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn line_height_from_style(style: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size.size();
match style.get_inheritedtext().line_height {
LineHeight::Normal => Au::from(metrics.line_gap),
LineHeight::Number(l) => font_size.scale_by(l.0),
LineHeight::Length(l) => Au::from(l)
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let first_fragment = fragments.front_mut().unwrap();
let string_before;
let selection_before;
{
if!first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = ByteIndex(string_before.len() as isize);
match unscanned_text_fragment_info.selection {
Some(ref mut selection) if selection.begin() >= offset => {
// Selection is entirely in the second fragment.
selection_before = None;
selection.shift_by(-offset);
}
Some(ref mut selection) if selection.end() > offset => {
// Selection is split across two fragments.
selection_before = Some(Range::new(selection.begin(), offset));
*selection = Range::new(ByteIndex(0), selection.end() - offset);
}
_ => {
// Selection is entirely in the first fragment.
selection_before = unscanned_text_fragment_info.selection;
unscanned_text_fragment_info.selection = None;
}
};
}
first_fragment.transform(
first_fragment.border_box.size,
SpecificFragmentInfo::UnscannedText(Box::new(
UnscannedTextFragmentInfo::new(string_before.into_boxed_str(), selection_before)
))
)
};
fragments.push_front(new_fragment);
}
/// Information about a text run that we're about to create. This is used in `scan_for_runs`.
struct RunInfo {
/// The text that will go in this text run.
text: String,
/// The insertion point in this text run, if applicable.
insertion_point: Option<ByteIndex>,
/// The index of the applicable font in the font group.
font_index: usize,
/// The bidirection embedding level of this text run.
bidi_level: bidi::Level,
/// The Unicode script property of this text run.
script: Script,
}
impl RunInfo {
fn new() -> RunInfo {
RunInfo {
text: String::new(),
insertion_point: None,
font_index: 0,
bidi_level: bidi::Level::ltr(),
script: Script::Common,
}
}
/// Finish processing this RunInfo and add it to the "done" list.
///
/// * `insertion_point`: The position of the insertion point, in characters relative to the start
/// of this text run.
fn flush(mut self,
list: &mut Vec<RunInfo>,
insertion_point: &mut Option<ByteIndex>) {
if let Some(idx) = *insertion_point {
let char_len = ByteIndex(self.text.len() as isize);
if idx <= char_len {
// The insertion point is in this text run.
self.insertion_point = insertion_point.take()
} else {
// Continue looking for the insertion point in the next text run.
*insertion_point = Some(idx - char_len)
}
}
list.push(self);
}
}
/// A mapping from a portion of an unscanned text fragment to the text run we're going to create
/// for it.
#[derive(Clone, Copy, Debug)]
struct RunMapping {
/// The range of byte indices within the text fragment.
byte_range: Range<usize>,
/// The index of the unscanned text fragment that this mapping corresponds to.
old_fragment_index: usize,
/// The index of the text run we're going to create.
text_run_index: usize,
/// Is the text in this fragment selected?
selected: bool,
}
impl RunMapping {
/// Given the current set of text runs, creates a run mapping for the next fragment.
/// `run_info_list` describes the set of runs we've seen already.
fn new(run_info_list: &[RunInfo], fragment_index: usize)
-> RunMapping {
RunMapping {
byte_range: Range::new(0, 0),
old_fragment_index: fragment_index,
text_run_index: run_info_list.len(),
selected: false,
}
}
/// Flushes this run mapping to the list. `run_info` describes the text run that we're
/// currently working on. `text` refers to the text of this fragment.
fn flush(mut self,
mappings: &mut Vec<RunMapping>,
run_info: &mut RunInfo,
text: &str,
compression: CompressionMode,
text_transform: TextTransform,
last_whitespace: &mut bool,
start_position: &mut usize,
end_position: usize) {
let was_empty = *start_position == end_position;
let old_byte_length = run_info.text.len();
*last_whitespace = util::transform_text(&text[(*start_position)..end_position],
compression,
*last_whitespace,
&mut run_info.text);
// Account for `text-transform`. (Confusingly, this is not handled in "text
// transformation" above, but we follow Gecko in the naming.)
let is_first_run = *start_position == 0;
apply_style_transform_if_necessary(&mut run_info.text, old_byte_length, text_transform,
*last_whitespace, is_first_run);
*start_position = end_position;
let new_byte_length = run_info.text.len();
let is_empty = new_byte_length == old_byte_length;
// Don't save mappings that contain only discarded characters.
// (But keep ones that contained no characters to begin with, since they might have been
// generated by an empty flow to draw its borders/padding/insertion point.)
if is_empty &&!was_empty {
return;
}
self.byte_range = Range::new(old_byte_length, new_byte_length - old_byte_length);
mappings.push(self)
}
/// Is the insertion point for this text run within this mapping?
///
/// NOTE: We treat the range as inclusive at both ends, since the insertion point can lie
/// before the first character *or* after the last character, and should be drawn even if the
/// text is empty.
fn contains_insertion_point(&self, insertion_point: Option<ByteIndex>) -> bool {
match insertion_point.map(ByteIndex::to_usize) {
None => false,
Some(idx) => self.byte_range.begin() <= idx && idx <= self.byte_range.end()
}
}
}
/// Accounts for `text-transform`.
///
/// FIXME(#4311, pcwalton): Title-case mapping can change length of the string;
/// case mapping should be language-specific; `full-width`;
/// use graphemes instead of characters.
fn apply_style_transform_if_necessary(string: &mut String,
first_character_position: usize,
text_transform: TextTransform,
last_whitespace: bool,
is_first_run: bool) {
match text_transform {
TextTransform::None => {}
TextTransform::Uppercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
for ch in original.chars().flat_map(|ch| ch.to_uppercase()) {
string.push(ch);
}
}
TextTransform::Lowercase => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
for ch in original.chars().flat_map(|ch| ch.to_lowercase()) {
string.push(ch);
}
}
TextTransform::Capitalize => {
let original = string[first_character_position..].to_owned();
string.truncate(first_character_position);
let mut capitalize_next_letter = is_first_run || last_whitespace;
for character in original.chars() {
// FIXME(#4311, pcwalton): Should be the CSS/Unicode notion of a *typographic
// letter unit*, not an *alphabetic* character:
//
// http://dev.w3.org/csswg/css-text/#typographic-letter-unit
if capitalize_next_letter && character.is_alphabetic() {
string.push(character.to_uppercase().next().unwrap());
capitalize_next_letter = false;
continue
}
string.push(character);
// FIXME(#4311, pcwalton): Try UAX29 instead of just whitespace.
if character.is_whitespace() {
capitalize_next_letter = true
}
}
}
}
}
#[derive(Clone)]
struct ScannedTextRun {
run: Arc<TextRun>,
insertion_point: Option<ByteIndex>,
}
/// Can a character with script `b` continue a text run with script `a`?
fn is_compatible(a: Script, b: Script) -> bool {
|
a == b || !is_specific(a) || !is_specific(b)
}
/
|
identifier_body
|
|
text.rs
|
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn
|
(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[bidi::Level]>,
mut last_whitespace: bool,
linebreaker: &mut Option<LineBreakLeafIter>)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
let word_break;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().clone_font();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
WhiteSpace::Normal |
WhiteSpace::Nowrap => CompressionMode::CompressWhitespaceNewline,
WhiteSpace::Pre |
WhiteSpace::PreWrap => CompressionMode::CompressNone,
WhiteSpace::PreLine => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing;
word_spacing = inherited_text_style.word_spacing.value()
.map(|lop| lop.to_hash_key())
.unwrap_or((Au(0), NotNaN::new(0.0).unwrap()));
text_rendering = inherited_text_style.text_rendering;
word_break = inherited_text_style.word_break;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
let mut insertion_point = None;
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
debug!(" flushing {:?}", in_fragment);
let mut mapping = RunMapping::new(&run_info_list[..], fragment_index);
let text;
let selection;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) => {
text = &text_fragment_info.text;
selection = text_fragment_info.selection;
}
_ => panic!("Expected an unscanned text fragment!"),
};
insertion_point = match selection {
Some(range) if range.is_empty() => {
// `range` is the range within the current fragment. To get the range
// within the text run, offset it by the length of the preceding fragments.
Some(range.begin() + ByteIndex(run_info.text.len() as isize))
}
_ => None
};
let (mut start_position, mut end_position) = (0, 0);
for (byte_index, character) in text.char_indices() {
// Search for the first font in this font group that contains a glyph for this
// character.
let font_index = fontgroup.fonts.iter().position(|font| {
font.borrow().glyph_index(character).is_some()
}).unwrap_or(0);
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => bidi::Level::ltr(),
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script &&!is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
let selected = match selection {
Some(range) => range.contains(ByteIndex(byte_index as isize)),
None => false
};
// Now, if necessary, flush the mapping we were building up.
let flush_run = run_info.font_index!= font_index ||
run_info.bidi_level!= bidi_level ||
!compatible_script;
let new_mapping_needed = flush_run || mapping.selected!= selected;
if new_mapping_needed {
// We ignore empty mappings at the very start of a fragment.
// The run info values are uninitialized at this point so
// flushing an empty mapping is pointless.
if end_position > 0 {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
if flush_run {
run_info.flush(&mut run_info_list, &mut insertion_point);
run_info = RunInfo::new();
}
mapping = RunMapping::new(&run_info_list[..],
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
mapping.selected = selected;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info.flush(&mut run_info_list, &mut insertion_point);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
if let Some(v) = letter_spacing.value() {
if v.px()!= 0. {
flags.insert(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG);
}
}
if text_rendering == TextRendering::Optimizespeed {
flags.insert(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(ShapingFlags::DISABLE_KERNING_SHAPING_FLAG)
}
if word_break == WordBreak::KeepAll {
flags.insert(ShapingFlags::KEEP_ALL_FLAG);
}
let options = ShapingOptions {
letter_spacing: letter_spacing.value().cloned().map(Au::from),
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
let mut result = Vec::with_capacity(run_info_list.len());
for run_info in run_info_list {
let mut options = options;
options.script = run_info.script;
if run_info.bidi_level.is_rtl() {
options.flags.insert(ShapingFlags::RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
let (run, break_at_zero) = TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level,
linebreaker);
result.push((ScannedTextRun {
run: Arc::new(run),
insertion_point: run_info.insertion_point,
}, break_at_zero))
}
result
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
let mut prev_fragments_to_meld = Vec::new();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
let mut is_first_mapping_of_this_old_fragment = true;
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if is_first_mapping_of_this_old_fragment {
// There were no mappings for this unscanned fragment. Transfer its
// flags to the previous/next sibling elements instead.
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
prev_fragments_to_meld.push(old_fragment);
}
break;
}
};
let mapping = mappings.next().unwrap();
let (scanned_run, break_at_zero) = runs[mapping.text_run_index].clone();
let mut byte_range = Range::new(ByteIndex(mapping.byte_range.begin() as isize),
ByteIndex(mapping.byte_range.length() as isize));
let mut flags = ScannedTextFlags::empty();
if!break_at_zero && mapping.byte_range.begin() == 0 {
// If this is the first segment of the text run,
// and the text run doesn't break at zero, suppress line breaks
flags.insert(ScannedTextFlags::SUPPRESS_LINE_BREAK_BEFORE)
}
let text_size = old_fragment.border_box.size;
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text[mapping.byte_range.begin()..mapping.byte_range.end()]
.ends_with('\n');
if requires_line_break_afterward_if_wrapping_on_newlines {
byte_range.extend_by(ByteIndex(-1)); // Trim the '\n'
flags.insert(ScannedTextFlags::REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES);
}
if mapping.selected {
flags.insert(ScannedTextFlags::SELECTED);
}
let insertion_point = if mapping.contains_insertion_point(scanned_run.insertion_point) {
scanned_run.insertion_point
} else {
None
};
let mut new_text_fragment_info = Box::new(ScannedTextFragmentInfo::new(
scanned_run.run,
byte_range,
text_size,
insertion_point,
flags
));
let new_metrics = new_text_fragment_info.run.metrics_for_range(&byte_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let mut new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
let is_last_mapping_of_this_old_fragment = match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => false,
_ => true
};
if let Some(ref mut context) = new_fragment.inline_context {
for node in &mut context.nodes {
if!is_last_mapping_of_this_old_fragment {
node.flags.remove(InlineFragmentNodeFlags::LAST_FRAGMENT_OF_ELEMENT);
}
if!is_first_mapping_of_this_old_fragment {
node.flags.remove(InlineFragmentNodeFlags::FIRST_FRAGMENT_OF_ELEMENT);
|
flush_clump_to_list
|
identifier_name
|
text.rs
|
};
// Now, if necessary, flush the mapping we were building up.
let flush_run = run_info.font_index!= font_index ||
run_info.bidi_level!= bidi_level ||
!compatible_script;
let new_mapping_needed = flush_run || mapping.selected!= selected;
if new_mapping_needed {
// We ignore empty mappings at the very start of a fragment.
// The run info values are uninitialized at this point so
// flushing an empty mapping is pointless.
if end_position > 0 {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
if flush_run {
run_info.flush(&mut run_info_list, &mut insertion_point);
run_info = RunInfo::new();
}
mapping = RunMapping::new(&run_info_list[..],
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
mapping.selected = selected;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info.flush(&mut run_info_list, &mut insertion_point);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
if let Some(v) = letter_spacing.value() {
if v.px()!= 0. {
flags.insert(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG);
}
}
if text_rendering == TextRendering::Optimizespeed {
flags.insert(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(ShapingFlags::DISABLE_KERNING_SHAPING_FLAG)
}
if word_break == WordBreak::KeepAll {
flags.insert(ShapingFlags::KEEP_ALL_FLAG);
}
let options = ShapingOptions {
letter_spacing: letter_spacing.value().cloned().map(Au::from),
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
let mut result = Vec::with_capacity(run_info_list.len());
for run_info in run_info_list {
let mut options = options;
options.script = run_info.script;
if run_info.bidi_level.is_rtl() {
options.flags.insert(ShapingFlags::RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
let (run, break_at_zero) = TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level,
linebreaker);
result.push((ScannedTextRun {
run: Arc::new(run),
insertion_point: run_info.insertion_point,
}, break_at_zero))
}
result
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
let mut prev_fragments_to_meld = Vec::new();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
let mut is_first_mapping_of_this_old_fragment = true;
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if is_first_mapping_of_this_old_fragment {
// There were no mappings for this unscanned fragment. Transfer its
// flags to the previous/next sibling elements instead.
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
prev_fragments_to_meld.push(old_fragment);
}
break;
}
};
let mapping = mappings.next().unwrap();
let (scanned_run, break_at_zero) = runs[mapping.text_run_index].clone();
let mut byte_range = Range::new(ByteIndex(mapping.byte_range.begin() as isize),
ByteIndex(mapping.byte_range.length() as isize));
let mut flags = ScannedTextFlags::empty();
if!break_at_zero && mapping.byte_range.begin() == 0 {
// If this is the first segment of the text run,
// and the text run doesn't break at zero, suppress line breaks
flags.insert(ScannedTextFlags::SUPPRESS_LINE_BREAK_BEFORE)
}
let text_size = old_fragment.border_box.size;
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text[mapping.byte_range.begin()..mapping.byte_range.end()]
.ends_with('\n');
if requires_line_break_afterward_if_wrapping_on_newlines {
byte_range.extend_by(ByteIndex(-1)); // Trim the '\n'
flags.insert(ScannedTextFlags::REQUIRES_LINE_BREAK_AFTERWARD_IF_WRAPPING_ON_NEWLINES);
}
if mapping.selected {
flags.insert(ScannedTextFlags::SELECTED);
}
let insertion_point = if mapping.contains_insertion_point(scanned_run.insertion_point) {
scanned_run.insertion_point
} else {
None
};
let mut new_text_fragment_info = Box::new(ScannedTextFragmentInfo::new(
scanned_run.run,
byte_range,
text_size,
insertion_point,
flags
));
let new_metrics = new_text_fragment_info.run.metrics_for_range(&byte_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let mut new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
let is_last_mapping_of_this_old_fragment = match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => false,
_ => true
};
if let Some(ref mut context) = new_fragment.inline_context {
for node in &mut context.nodes {
if!is_last_mapping_of_this_old_fragment {
node.flags.remove(InlineFragmentNodeFlags::LAST_FRAGMENT_OF_ELEMENT);
}
if!is_first_mapping_of_this_old_fragment {
node.flags.remove(InlineFragmentNodeFlags::FIRST_FRAGMENT_OF_ELEMENT);
}
}
}
for prev_fragment in prev_fragments_to_meld.drain(..) {
new_fragment.meld_with_prev_inline_fragment(&prev_fragment);
}
is_first_mapping_of_this_old_fragment = false;
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// TODO: When the text-orientation property is supported, the block and inline directions may
// be swapped for horizontal glyphs in vertical lines.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `style_structs::Font`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: ::ServoArc<style_structs::Font>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn line_height_from_style(style: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size.size();
match style.get_inheritedtext().line_height {
LineHeight::Normal => Au::from(metrics.line_gap),
LineHeight::Number(l) => font_size.scale_by(l.0),
LineHeight::Length(l) => Au::from(l)
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let first_fragment = fragments.front_mut().unwrap();
let string_before;
let selection_before;
{
if!first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = ByteIndex(string_before.len() as isize);
match unscanned_text_fragment_info.selection {
Some(ref mut selection) if selection.begin() >= offset => {
// Selection is entirely in the second fragment.
selection_before = None;
selection.shift_by(-offset);
}
Some(ref mut selection) if selection.end() > offset => {
// Selection is split across two fragments.
selection_before = Some(Range::new(selection.begin(), offset));
*selection = Range::new(ByteIndex(0), selection.end() - offset);
}
_ => {
// Selection is entirely in the first fragment.
selection_before = unscanned_text_fragment_info.selection;
unscanned_text_fragment_info.selection = None;
}
};
}
first_fragment.transform(
first_fragment.border_box.size,
SpecificFragmentInfo::UnscannedText(Box::new(
UnscannedTextFragmentInfo::new(string_before.into_boxed_str(), selection_before)
))
)
};
fragments.push_front(new_fragment);
}
/// Information about a text run that we're about to create. This is used in `scan_for_runs`.
struct RunInfo {
/// The text that will go in this text run.
text: String,
/// The insertion point in this text run, if applicable.
insertion_point: Option<ByteIndex>,
/// The index of the applicable font in the font group.
font_index: usize,
/// The bidirection embedding level of this text run.
bidi_level: bidi::Level,
/// The Unicode script property of this text run.
script: Script,
}
impl RunInfo {
fn new() -> RunInfo {
RunInfo {
text: String::new(),
insertion_point: None,
font_index: 0,
bidi_level: bidi::Level::ltr(),
script: Script::Common,
}
}
/// Finish processing this RunInfo and add it to the "done" list.
///
/// * `insertion_point`: The position of the insertion point, in characters relative to the start
/// of this text run.
fn flush(mut self,
list: &mut Vec<RunInfo>,
insertion_point: &mut Option<ByteIndex>) {
if let Some(idx) = *insertion_point {
let char_len = ByteIndex(self.text.len() as isize);
if idx <= char_len {
// The insertion point is in this text run.
self.insertion_point = insertion_point.take()
} else {
// Continue looking for the insertion point in the next text run.
*insertion_point = Some(idx - char_len)
}
}
list.push(self);
}
}
/// A mapping from a portion of an unscanned text fragment to the text run we're going to create
/// for it.
#[derive(Clone, Copy, Debug)]
struct RunMapping {
/// The range of byte indices within the text fragment.
byte_range: Range<usize>,
/// The index of the unscanned text fragment that this mapping corresponds to.
old_fragment_index: usize,
/// The index of the text run we're going to create.
text_run_index: usize,
/// Is the text in this fragment selected?
selected: bool,
}
impl RunMapping {
/// Given the current set of text runs, creates a run mapping for the next fragment.
/// `run_info_list` describes the set of runs we've seen already.
fn new(run_info_list: &[RunInfo], fragment_index: usize)
-> RunMapping {
RunMapping {
byte_range: Range::new(0, 0),
|
old_fragment_index: fragment_index,
|
random_line_split
|
|
input.rs
|
#![allow(unused_variables)]
/// Motion directions
pub enum Direction {
North,
NorthEast,
East,
SouthEast,
South,
SouthWest,
West,
NorthWest,
}
/// Commands
///
/// They can be generated by a user input, or as the consequences of
/// some other actions.
pub enum Command {
// List of generic game actions
/// Quit game
Quit,
/// Save game and quit
Save,
// Toggle modes
// TBD
//
// List of Normal mode player action
/// Move towards Directions
MoveTowards(Direction),
/// Go to (x,y,z) coordinates
MoveTo { x: i64, y: i64, z: i64 },
// List of Explore mode player action
/// Move Cursor towards Directions
MoveCursorTowards(Direction),
/// Move cursor to (x,y,z) coordinates
CurstorTo { x: i64, y: i64, z: i64 },
}
/// Mode of input similar to Vim input mode.
///
/// In each mode the key will have a different meaning
#[derive(Debug)]
pub enum InputMode {
/// Normal game mode
Normal,
/// Move cursor freely around to identify things
Explore,
/// Menu
Menu,
/// Yes or no question
YesOrNo,
/// Help files
Help,
}
pub struct InputHandler {
mode: InputMode,
}
impl InputHandler {
pub fn new() -> InputHandler {
InputHandler { mode: InputMode::Normal }
}
pub fn change_mode(&mut self, new_mode: InputMode) {
self.mode = new_mode;
}
pub fn process_key(&self, key: char) {
match self.mode {
InputMode::Normal => self.process_normal(key),
InputMode::Explore => self.process_explore(key),
InputMode::Menu => self.process_menu(key),
InputMode::YesOrNo => self.process_yesorno(key),
InputMode::Help => self.process_help(key),
}
}
fn process_normal(&self, key: char) {
match key {
'h' => info!("west"),
'j' => info!("south"),
'k' => info!("north"),
'l' => info!("left"),
'y' => info!("left"),
'u' => info!("left"),
'b' => info!("left"),
'n' => info!("left"),
_ => warn!("unknown command in {:?} mode : {}", self.mode, key),
}
}
fn process_explore(&self, key: char) {
unimplemented!();
}
fn process_menu(&self, key: char) {
unimplemented!();
}
fn process_yesorno(&self, key: char) {
unimplemented!();
}
fn
|
(&self, key: char) {
unimplemented!();
}
}
|
process_help
|
identifier_name
|
input.rs
|
#![allow(unused_variables)]
/// Motion directions
pub enum Direction {
North,
NorthEast,
East,
SouthEast,
South,
SouthWest,
West,
NorthWest,
}
/// Commands
///
/// They can be generated by a user input, or as the consequences of
/// some other actions.
pub enum Command {
// List of generic game actions
/// Quit game
Quit,
/// Save game and quit
Save,
// Toggle modes
// TBD
//
// List of Normal mode player action
/// Move towards Directions
MoveTowards(Direction),
/// Go to (x,y,z) coordinates
MoveTo { x: i64, y: i64, z: i64 },
// List of Explore mode player action
/// Move Cursor towards Directions
MoveCursorTowards(Direction),
/// Move cursor to (x,y,z) coordinates
CurstorTo { x: i64, y: i64, z: i64 },
}
/// Mode of input similar to Vim input mode.
///
/// In each mode the key will have a different meaning
#[derive(Debug)]
pub enum InputMode {
/// Normal game mode
Normal,
/// Move cursor freely around to identify things
Explore,
/// Menu
Menu,
/// Yes or no question
YesOrNo,
/// Help files
Help,
}
pub struct InputHandler {
mode: InputMode,
}
impl InputHandler {
pub fn new() -> InputHandler {
InputHandler { mode: InputMode::Normal }
}
pub fn change_mode(&mut self, new_mode: InputMode) {
|
self.mode = new_mode;
}
pub fn process_key(&self, key: char) {
match self.mode {
InputMode::Normal => self.process_normal(key),
InputMode::Explore => self.process_explore(key),
InputMode::Menu => self.process_menu(key),
InputMode::YesOrNo => self.process_yesorno(key),
InputMode::Help => self.process_help(key),
}
}
fn process_normal(&self, key: char) {
match key {
'h' => info!("west"),
'j' => info!("south"),
'k' => info!("north"),
'l' => info!("left"),
'y' => info!("left"),
'u' => info!("left"),
'b' => info!("left"),
'n' => info!("left"),
_ => warn!("unknown command in {:?} mode : {}", self.mode, key),
}
}
fn process_explore(&self, key: char) {
unimplemented!();
}
fn process_menu(&self, key: char) {
unimplemented!();
}
fn process_yesorno(&self, key: char) {
unimplemented!();
}
fn process_help(&self, key: char) {
unimplemented!();
}
}
|
random_line_split
|
|
input.rs
|
#![allow(unused_variables)]
/// Motion directions
pub enum Direction {
North,
NorthEast,
East,
SouthEast,
South,
SouthWest,
West,
NorthWest,
}
/// Commands
///
/// They can be generated by a user input, or as the consequences of
/// some other actions.
pub enum Command {
// List of generic game actions
/// Quit game
Quit,
/// Save game and quit
Save,
// Toggle modes
// TBD
//
// List of Normal mode player action
/// Move towards Directions
MoveTowards(Direction),
/// Go to (x,y,z) coordinates
MoveTo { x: i64, y: i64, z: i64 },
// List of Explore mode player action
/// Move Cursor towards Directions
MoveCursorTowards(Direction),
/// Move cursor to (x,y,z) coordinates
CurstorTo { x: i64, y: i64, z: i64 },
}
/// Mode of input similar to Vim input mode.
///
/// In each mode the key will have a different meaning
#[derive(Debug)]
pub enum InputMode {
/// Normal game mode
Normal,
/// Move cursor freely around to identify things
Explore,
/// Menu
Menu,
/// Yes or no question
YesOrNo,
/// Help files
Help,
}
pub struct InputHandler {
mode: InputMode,
}
impl InputHandler {
pub fn new() -> InputHandler {
InputHandler { mode: InputMode::Normal }
}
pub fn change_mode(&mut self, new_mode: InputMode) {
self.mode = new_mode;
}
pub fn process_key(&self, key: char) {
match self.mode {
InputMode::Normal => self.process_normal(key),
InputMode::Explore => self.process_explore(key),
InputMode::Menu => self.process_menu(key),
InputMode::YesOrNo => self.process_yesorno(key),
InputMode::Help => self.process_help(key),
}
}
fn process_normal(&self, key: char) {
match key {
'h' => info!("west"),
'j' => info!("south"),
'k' => info!("north"),
'l' => info!("left"),
'y' => info!("left"),
'u' => info!("left"),
'b' => info!("left"),
'n' => info!("left"),
_ => warn!("unknown command in {:?} mode : {}", self.mode, key),
}
}
fn process_explore(&self, key: char)
|
fn process_menu(&self, key: char) {
unimplemented!();
}
fn process_yesorno(&self, key: char) {
unimplemented!();
}
fn process_help(&self, key: char) {
unimplemented!();
}
}
|
{
unimplemented!();
}
|
identifier_body
|
mailbox.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::fsm::{Fsm, FsmScheduler, FsmState};
use crossbeam::channel::{SendError, TrySendError};
use std::borrow::Cow;
use std::sync::Arc;
use tikv_util::mpsc;
/// A basic mailbox.
///
/// Every mailbox should have one and only one owner, who will receive all
/// messages sent to this mailbox.
///
/// When a message is sent to a mailbox, its owner will be checked whether it's
/// idle. An idle owner will be scheduled via `FsmScheduler` immediately, which
/// will drive the fsm to poll for messages.
pub struct BasicMailbox<Owner: Fsm> {
sender: mpsc::LooseBoundedSender<Owner::Message>,
state: Arc<FsmState<Owner>>,
}
impl<Owner: Fsm> BasicMailbox<Owner> {
#[inline]
pub fn new(
sender: mpsc::LooseBoundedSender<Owner::Message>,
fsm: Box<Owner>,
) -> BasicMailbox<Owner> {
BasicMailbox {
sender,
state: Arc::new(FsmState::new(fsm)),
}
}
pub(crate) fn is_connected(&self) -> bool {
self.sender.is_sender_connected()
}
pub(crate) fn release(&self, fsm: Box<Owner>) {
self.state.release(fsm)
}
pub(crate) fn take_fsm(&self) -> Option<Box<Owner>> {
self.state.take_fsm()
}
#[inline]
pub fn len(&self) -> usize {
self.sender.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.sender.is_empty()
}
/// Force sending a message despite the capacity limit on channel.
#[inline]
pub fn force_send<S: FsmScheduler<Fsm = Owner>>(
&self,
msg: Owner::Message,
scheduler: &S,
) -> Result<(), SendError<Owner::Message>> {
self.sender.force_send(msg)?;
self.state.notify(scheduler, Cow::Borrowed(self));
Ok(())
}
/// Try to send a message to the mailbox.
///
/// If there are too many pending messages, function may fail.
#[inline]
pub fn try_send<S: FsmScheduler<Fsm = Owner>>(
&self,
msg: Owner::Message,
scheduler: &S,
) -> Result<(), TrySendError<Owner::Message>> {
self.sender.try_send(msg)?;
self.state.notify(scheduler, Cow::Borrowed(self));
Ok(())
}
/// Close the mailbox explicitly.
#[inline]
pub(crate) fn close(&self) {
self.sender.close_sender();
self.state.clear();
}
}
impl<Owner: Fsm> Clone for BasicMailbox<Owner> {
#[inline]
fn
|
(&self) -> BasicMailbox<Owner> {
BasicMailbox {
sender: self.sender.clone(),
state: self.state.clone(),
}
}
}
/// A more high level mailbox.
pub struct Mailbox<Owner, Scheduler>
where
Owner: Fsm,
Scheduler: FsmScheduler<Fsm = Owner>,
{
mailbox: BasicMailbox<Owner>,
scheduler: Scheduler,
}
impl<Owner, Scheduler> Mailbox<Owner, Scheduler>
where
Owner: Fsm,
Scheduler: FsmScheduler<Fsm = Owner>,
{
pub fn new(mailbox: BasicMailbox<Owner>, scheduler: Scheduler) -> Mailbox<Owner, Scheduler> {
Mailbox { mailbox, scheduler }
}
/// Force sending a message despite channel capacity limit.
#[inline]
pub fn force_send(&self, msg: Owner::Message) -> Result<(), SendError<Owner::Message>> {
self.mailbox.force_send(msg, &self.scheduler)
}
/// Try to send a message.
#[inline]
pub fn try_send(&self, msg: Owner::Message) -> Result<(), TrySendError<Owner::Message>> {
self.mailbox.try_send(msg, &self.scheduler)
}
}
|
clone
|
identifier_name
|
mailbox.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::fsm::{Fsm, FsmScheduler, FsmState};
use crossbeam::channel::{SendError, TrySendError};
use std::borrow::Cow;
use std::sync::Arc;
use tikv_util::mpsc;
/// A basic mailbox.
///
/// Every mailbox should have one and only one owner, who will receive all
/// messages sent to this mailbox.
///
/// When a message is sent to a mailbox, its owner will be checked whether it's
/// idle. An idle owner will be scheduled via `FsmScheduler` immediately, which
/// will drive the fsm to poll for messages.
pub struct BasicMailbox<Owner: Fsm> {
sender: mpsc::LooseBoundedSender<Owner::Message>,
state: Arc<FsmState<Owner>>,
}
impl<Owner: Fsm> BasicMailbox<Owner> {
#[inline]
pub fn new(
sender: mpsc::LooseBoundedSender<Owner::Message>,
fsm: Box<Owner>,
) -> BasicMailbox<Owner> {
BasicMailbox {
sender,
state: Arc::new(FsmState::new(fsm)),
}
}
pub(crate) fn is_connected(&self) -> bool
|
pub(crate) fn release(&self, fsm: Box<Owner>) {
self.state.release(fsm)
}
pub(crate) fn take_fsm(&self) -> Option<Box<Owner>> {
self.state.take_fsm()
}
#[inline]
pub fn len(&self) -> usize {
self.sender.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.sender.is_empty()
}
/// Force sending a message despite the capacity limit on channel.
#[inline]
pub fn force_send<S: FsmScheduler<Fsm = Owner>>(
&self,
msg: Owner::Message,
scheduler: &S,
) -> Result<(), SendError<Owner::Message>> {
self.sender.force_send(msg)?;
self.state.notify(scheduler, Cow::Borrowed(self));
Ok(())
}
/// Try to send a message to the mailbox.
///
/// If there are too many pending messages, function may fail.
#[inline]
pub fn try_send<S: FsmScheduler<Fsm = Owner>>(
&self,
msg: Owner::Message,
scheduler: &S,
) -> Result<(), TrySendError<Owner::Message>> {
self.sender.try_send(msg)?;
self.state.notify(scheduler, Cow::Borrowed(self));
Ok(())
}
/// Close the mailbox explicitly.
#[inline]
pub(crate) fn close(&self) {
self.sender.close_sender();
self.state.clear();
}
}
impl<Owner: Fsm> Clone for BasicMailbox<Owner> {
#[inline]
fn clone(&self) -> BasicMailbox<Owner> {
BasicMailbox {
sender: self.sender.clone(),
state: self.state.clone(),
}
}
}
/// A more high level mailbox.
pub struct Mailbox<Owner, Scheduler>
where
Owner: Fsm,
Scheduler: FsmScheduler<Fsm = Owner>,
{
mailbox: BasicMailbox<Owner>,
scheduler: Scheduler,
}
impl<Owner, Scheduler> Mailbox<Owner, Scheduler>
where
Owner: Fsm,
Scheduler: FsmScheduler<Fsm = Owner>,
{
pub fn new(mailbox: BasicMailbox<Owner>, scheduler: Scheduler) -> Mailbox<Owner, Scheduler> {
Mailbox { mailbox, scheduler }
}
/// Force sending a message despite channel capacity limit.
#[inline]
pub fn force_send(&self, msg: Owner::Message) -> Result<(), SendError<Owner::Message>> {
self.mailbox.force_send(msg, &self.scheduler)
}
/// Try to send a message.
#[inline]
pub fn try_send(&self, msg: Owner::Message) -> Result<(), TrySendError<Owner::Message>> {
self.mailbox.try_send(msg, &self.scheduler)
}
}
|
{
self.sender.is_sender_connected()
}
|
identifier_body
|
mailbox.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::fsm::{Fsm, FsmScheduler, FsmState};
use crossbeam::channel::{SendError, TrySendError};
use std::borrow::Cow;
use std::sync::Arc;
use tikv_util::mpsc;
/// A basic mailbox.
///
/// Every mailbox should have one and only one owner, who will receive all
/// messages sent to this mailbox.
///
/// When a message is sent to a mailbox, its owner will be checked whether it's
/// idle. An idle owner will be scheduled via `FsmScheduler` immediately, which
/// will drive the fsm to poll for messages.
pub struct BasicMailbox<Owner: Fsm> {
sender: mpsc::LooseBoundedSender<Owner::Message>,
state: Arc<FsmState<Owner>>,
}
impl<Owner: Fsm> BasicMailbox<Owner> {
#[inline]
pub fn new(
sender: mpsc::LooseBoundedSender<Owner::Message>,
fsm: Box<Owner>,
) -> BasicMailbox<Owner> {
BasicMailbox {
sender,
state: Arc::new(FsmState::new(fsm)),
}
}
pub(crate) fn is_connected(&self) -> bool {
self.sender.is_sender_connected()
}
pub(crate) fn release(&self, fsm: Box<Owner>) {
self.state.release(fsm)
}
pub(crate) fn take_fsm(&self) -> Option<Box<Owner>> {
self.state.take_fsm()
}
#[inline]
pub fn len(&self) -> usize {
self.sender.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.sender.is_empty()
}
/// Force sending a message despite the capacity limit on channel.
#[inline]
pub fn force_send<S: FsmScheduler<Fsm = Owner>>(
&self,
msg: Owner::Message,
scheduler: &S,
) -> Result<(), SendError<Owner::Message>> {
self.sender.force_send(msg)?;
self.state.notify(scheduler, Cow::Borrowed(self));
Ok(())
}
/// Try to send a message to the mailbox.
///
/// If there are too many pending messages, function may fail.
#[inline]
pub fn try_send<S: FsmScheduler<Fsm = Owner>>(
&self,
msg: Owner::Message,
scheduler: &S,
) -> Result<(), TrySendError<Owner::Message>> {
self.sender.try_send(msg)?;
self.state.notify(scheduler, Cow::Borrowed(self));
Ok(())
}
/// Close the mailbox explicitly.
#[inline]
pub(crate) fn close(&self) {
self.sender.close_sender();
self.state.clear();
}
}
impl<Owner: Fsm> Clone for BasicMailbox<Owner> {
#[inline]
fn clone(&self) -> BasicMailbox<Owner> {
BasicMailbox {
sender: self.sender.clone(),
state: self.state.clone(),
}
}
}
/// A more high level mailbox.
pub struct Mailbox<Owner, Scheduler>
where
Owner: Fsm,
Scheduler: FsmScheduler<Fsm = Owner>,
{
mailbox: BasicMailbox<Owner>,
scheduler: Scheduler,
}
impl<Owner, Scheduler> Mailbox<Owner, Scheduler>
where
Owner: Fsm,
Scheduler: FsmScheduler<Fsm = Owner>,
{
|
#[inline]
pub fn force_send(&self, msg: Owner::Message) -> Result<(), SendError<Owner::Message>> {
self.mailbox.force_send(msg, &self.scheduler)
}
/// Try to send a message.
#[inline]
pub fn try_send(&self, msg: Owner::Message) -> Result<(), TrySendError<Owner::Message>> {
self.mailbox.try_send(msg, &self.scheduler)
}
}
|
pub fn new(mailbox: BasicMailbox<Owner>, scheduler: Scheduler) -> Mailbox<Owner, Scheduler> {
Mailbox { mailbox, scheduler }
}
/// Force sending a message despite channel capacity limit.
|
random_line_split
|
gdal_ds.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::gdal_fields::*;
use gdal::spatial_ref::{CoordTransform, SpatialRef};
use gdal::vector::Geometry;
use gdal::Dataset;
use std::collections::BTreeMap;
use std::path::Path;
use t_rex_core::core::config::DatasourceCfg;
use t_rex_core::core::feature::Feature;
use t_rex_core::core::layer::Layer;
use t_rex_core::core::Config;
use t_rex_core::datasource::DatasourceType;
use tile_grid::Extent;
use tile_grid::Grid;
#[derive(Clone)]
pub struct GdalDatasource {
pub path: String,
// We don't store the Dataset, because we need mut access for getting layers
/// SpatialRef WKT for layers which need CoordTransform
geom_transform: BTreeMap<String, String>,
}
impl GdalDatasource {
pub fn new(path: &str) -> GdalDatasource {
GdalDatasource {
path: path.to_string(),
geom_transform: BTreeMap::new(),
}
}
}
impl DatasourceType for GdalDatasource {
/// New instance with connected pool
fn connected(&self) -> GdalDatasource {
GdalDatasource {
path: self.path.clone(),
geom_transform: BTreeMap::new(),
}
}
fn detect_layers(&self, _detect_geometry_types: bool) -> Vec<Layer> {
let mut layers: Vec<Layer> = Vec::new();
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
for gdal_layer in dataset.layers() {
let name = gdal_layer.name();
// Create a layer for each geometry field
for (n, field) in gdal_layer.defn().geom_fields().enumerate() {
let mut layer = Layer::new(&name);
layer.table_name = if n == 0 {
Some(name.clone())
} else {
Some(format!("{}_{}", &name, n))
};
layer.geometry_field = Some(field.name());
layer.geometry_type = geom_type_name(field.field_type());
let srs = field.spatial_ref().unwrap();
if let Ok(epsg) = srs.auth_code() {
layer.srid = Some(epsg)
}
layers.push(layer)
}
}
layers
}
/// Return column field names and Rust compatible type conversion - without geometry column
fn detect_data_columns(&self, _layer: &Layer, _sql: Option<&String>) -> Vec<(String, String)> {
Vec::new() //TODO
}
/// Projected extent
fn reproject_extent(
&self,
extent: &Extent,
dest_srid: i32,
src_srid: Option<i32>,
) -> Option<Extent> {
let ext_srid = src_srid.unwrap_or(4326);
transform_extent(extent, ext_srid, dest_srid).ok()
}
fn
|
(&self, layer: &Layer, grid_srid: i32) -> Option<Extent> {
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let extent = match ogr_layer.get_extent() {
Err(e) => {
warn!("Layer '{}': Unable to get extent: {:?}", layer.name, e);
None
}
Ok(extent) => Some(Extent {
minx: extent.MinX,
miny: extent.MinY,
maxx: extent.MaxX,
maxy: extent.MaxY,
}),
};
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
let src_sref = match layer_sref {
Some(ref sref) if!layer.no_transform => sref,
_ => &grid_sref,
};
let wgs84_sref = match sref(4326) {
Err(e) => {
warn!("Unable to get EPSG:4326 spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
match extent {
Some(extent) => match transform_extent_sref(&extent, src_sref, &wgs84_sref) {
Ok(extent) => Some(extent),
Err(e) => {
error!("Unable to transform {:?}: {:?}", extent, e);
None
}
},
None => None,
}
}
fn prepare_queries(&mut self, _tileset: &str, layer: &Layer, grid_srid: i32) {
if!Path::new(&self.path).exists() {
warn!(
"Layer '{}': Can't open dataset '{}'",
layer.name, &self.path
);
// We continue, because GDAL also supports HTTP adresses
}
let dataset = Dataset::open(Path::new(&self.path));
if let Err(ref err) = dataset {
error!("Layer '{}': Error opening dataset: '{}'", layer.name, err);
return;
}
let dataset = dataset.unwrap();
if layer.table_name.is_none() {
error!("Layer '{}': table_name missing", layer.name);
return;
}
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name);
if ogr_layer.is_err() {
error!(
"Layer '{}': Can't find dataset layer '{}'",
layer.name, layer_name
);
return;
}
let ogr_layer = ogr_layer.unwrap();
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return;
}
Ok(sref) => sref,
};
if!layer.no_transform {
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
if let Some(ref sref) = layer_sref {
info!(
"Layer '{}': Reprojecting geometry to SRID {}",
layer.name, grid_srid
);
if CoordTransform::new(sref, &grid_sref).is_err() {
error!(
"Layer '{}': Couldn't setup CoordTransform for reprojecting geometry to SRID {}",
layer.name, grid_srid
);
} else {
// We don't store prepared CoordTransform because CoordTransform is
// not Sync and cannot be shared between threads safely
self.geom_transform
.insert(layer.name.clone(), sref.to_wkt().unwrap());
}
} else {
warn!("Layer '{}': Couldn't detect spatialref", layer.name);
}
}
if layer.simplify {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Simplification not supported for GDAL layers",
layer.name
);
}
}
if layer.buffer_size.is_some() {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Clipping with buffer_size not supported for GDAL layers",
layer.name
);
}
}
}
fn retrieve_features<F>(
&self,
_tileset: &str,
layer: &Layer,
extent: &Extent,
zoom: u8,
grid: &Grid,
mut read: F,
) -> u64
where
F: FnMut(&dyn Feature),
{
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
debug!("retrieve_features layer: {}", layer_name);
let mut ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let mut bbox_extent = if let Some(pixels) = layer.buffer_size {
let pixel_width = grid.pixel_width(zoom);
let buf = f64::from(pixels) * pixel_width;
Extent {
minx: extent.minx - buf,
miny: extent.miny - buf,
maxx: extent.maxx + buf,
maxy: extent.maxy + buf,
}
} else {
extent.clone()
};
// CoordTransform for features
let mut transformation = None;
if let Some(ref wkt) = self.geom_transform.get(&layer.name) {
let grid_sref = sref(grid.srid as u32).unwrap();
let layer_sref = SpatialRef::from_wkt(wkt).unwrap();
// Spatial filter must be in layer SRS
let bbox_tr = CoordTransform::new(&grid_sref, &layer_sref).unwrap();
match transform_extent_tr(&bbox_extent, &bbox_tr) {
Ok(extent) => bbox_extent = extent,
Err(e) => {
error!("Unable to transform {:?}: {:?}", bbox_extent, e);
return 0;
}
}
transformation = CoordTransform::new(&layer_sref, &grid_sref).ok();
}
let bbox = Geometry::bbox(
bbox_extent.minx,
bbox_extent.miny,
bbox_extent.maxx,
bbox_extent.maxy,
)
.unwrap();
ogr_layer.set_spatial_filter(&bbox);
let ogr_layer_for_defn = dataset.layer_by_name(layer_name).unwrap();
let fields_defn = ogr_layer_for_defn.defn().fields().collect::<Vec<_>>();
let mut cnt = 0;
let query_limit = layer.query_limit.unwrap_or(0);
for feature in ogr_layer.features() {
let feat = VectorFeature {
layer: layer,
fields_defn: &fields_defn,
grid_srid: grid.srid,
transform: transformation.as_ref(),
feature: &feature,
};
read(&feat);
cnt += 1;
if cnt == query_limit as u64 {
info!(
"Features of layer {} limited to {} (tile query_limit reached, zoom level {})",
layer.name, cnt, zoom
);
break;
}
}
cnt
}
}
/// Projected extent
fn transform_extent(
extent: &Extent,
src_srid: i32,
dest_srid: i32,
) -> Result<Extent, gdal::errors::GdalError> {
let sref_in = sref(src_srid as u32)?;
let sref_out = sref(dest_srid as u32)?;
transform_extent_sref(extent, &sref_in, &sref_out)
}
const WKT_WSG84_LON_LAT: &str = r#"GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Lon",EAST],AXIS["Lat",NORTH],AUTHORITY["EPSG","4326"]]"#;
fn sref(srid: u32) -> Result<SpatialRef, gdal::errors::GdalError> {
if srid == 4326 {
// Return WGS84 in traditional GIS axis order
// See https://github.com/OSGeo/gdal/blob/master/gdal/doc/source/development/rfc/rfc73_proj6_wkt2_srsbarn.rst
SpatialRef::from_wkt(WKT_WSG84_LON_LAT)
} else {
SpatialRef::from_epsg(srid)
}
}
/// Projected extent
fn transform_extent_sref(
extent: &Extent,
src_sref: &SpatialRef,
dest_sref: &SpatialRef,
) -> Result<Extent, gdal::errors::GdalError> {
let transform = CoordTransform::new(src_sref, dest_sref)?;
transform_extent_tr(extent, &transform)
}
/// Projected extent
fn transform_extent_tr(
extent: &Extent,
transformation: &CoordTransform,
) -> Result<Extent, gdal::errors::GdalError> {
let xs = &mut [extent.minx, extent.maxx];
let ys = &mut [extent.miny, extent.maxy];
transformation.transform_coords(xs, ys, &mut [0.0, 0.0])?;
Ok(Extent {
minx: *xs.get(0).unwrap(),
miny: *ys.get(0).unwrap(),
maxx: *xs.get(1).unwrap(),
maxy: *ys.get(1).unwrap(),
})
}
impl<'a> Config<'a, DatasourceCfg> for GdalDatasource {
fn from_config(ds_cfg: &DatasourceCfg) -> Result<Self, String> {
Ok(GdalDatasource::new(ds_cfg.path.as_ref().unwrap()))
}
fn gen_config() -> String {
let toml = r#"
[[datasource]]
name = "ds"
# Dataset specification (http://gdal.org/ogr_formats.html)
path = "<filename-or-connection-spec>"
"#;
toml.to_string()
}
fn gen_runtime_config(&self) -> String {
format!(
r#"
[[datasource]]
path = "{}"
"#,
self.path
)
}
}
|
layer_extent
|
identifier_name
|
gdal_ds.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::gdal_fields::*;
use gdal::spatial_ref::{CoordTransform, SpatialRef};
use gdal::vector::Geometry;
use gdal::Dataset;
use std::collections::BTreeMap;
use std::path::Path;
use t_rex_core::core::config::DatasourceCfg;
use t_rex_core::core::feature::Feature;
use t_rex_core::core::layer::Layer;
use t_rex_core::core::Config;
use t_rex_core::datasource::DatasourceType;
use tile_grid::Extent;
use tile_grid::Grid;
#[derive(Clone)]
pub struct GdalDatasource {
pub path: String,
// We don't store the Dataset, because we need mut access for getting layers
/// SpatialRef WKT for layers which need CoordTransform
geom_transform: BTreeMap<String, String>,
}
impl GdalDatasource {
pub fn new(path: &str) -> GdalDatasource {
GdalDatasource {
path: path.to_string(),
geom_transform: BTreeMap::new(),
}
}
}
impl DatasourceType for GdalDatasource {
/// New instance with connected pool
fn connected(&self) -> GdalDatasource {
GdalDatasource {
path: self.path.clone(),
geom_transform: BTreeMap::new(),
}
}
fn detect_layers(&self, _detect_geometry_types: bool) -> Vec<Layer> {
let mut layers: Vec<Layer> = Vec::new();
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
for gdal_layer in dataset.layers() {
let name = gdal_layer.name();
// Create a layer for each geometry field
for (n, field) in gdal_layer.defn().geom_fields().enumerate() {
let mut layer = Layer::new(&name);
layer.table_name = if n == 0 {
Some(name.clone())
} else {
Some(format!("{}_{}", &name, n))
};
layer.geometry_field = Some(field.name());
layer.geometry_type = geom_type_name(field.field_type());
let srs = field.spatial_ref().unwrap();
if let Ok(epsg) = srs.auth_code() {
layer.srid = Some(epsg)
}
layers.push(layer)
}
}
layers
}
/// Return column field names and Rust compatible type conversion - without geometry column
fn detect_data_columns(&self, _layer: &Layer, _sql: Option<&String>) -> Vec<(String, String)> {
Vec::new() //TODO
}
/// Projected extent
fn reproject_extent(
&self,
extent: &Extent,
dest_srid: i32,
src_srid: Option<i32>,
) -> Option<Extent> {
let ext_srid = src_srid.unwrap_or(4326);
transform_extent(extent, ext_srid, dest_srid).ok()
}
fn layer_extent(&self, layer: &Layer, grid_srid: i32) -> Option<Extent> {
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let extent = match ogr_layer.get_extent() {
Err(e) => {
warn!("Layer '{}': Unable to get extent: {:?}", layer.name, e);
None
}
Ok(extent) => Some(Extent {
minx: extent.MinX,
miny: extent.MinY,
maxx: extent.MaxX,
maxy: extent.MaxY,
}),
};
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
let src_sref = match layer_sref {
Some(ref sref) if!layer.no_transform => sref,
_ => &grid_sref,
};
let wgs84_sref = match sref(4326) {
Err(e) => {
warn!("Unable to get EPSG:4326 spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
match extent {
Some(extent) => match transform_extent_sref(&extent, src_sref, &wgs84_sref) {
Ok(extent) => Some(extent),
Err(e) => {
error!("Unable to transform {:?}: {:?}", extent, e);
None
}
},
None => None,
}
}
fn prepare_queries(&mut self, _tileset: &str, layer: &Layer, grid_srid: i32) {
if!Path::new(&self.path).exists() {
warn!(
"Layer '{}': Can't open dataset '{}'",
layer.name, &self.path
);
// We continue, because GDAL also supports HTTP adresses
}
let dataset = Dataset::open(Path::new(&self.path));
if let Err(ref err) = dataset {
error!("Layer '{}': Error opening dataset: '{}'", layer.name, err);
return;
}
let dataset = dataset.unwrap();
if layer.table_name.is_none() {
error!("Layer '{}': table_name missing", layer.name);
return;
}
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name);
if ogr_layer.is_err() {
error!(
"Layer '{}': Can't find dataset layer '{}'",
layer.name, layer_name
);
return;
}
let ogr_layer = ogr_layer.unwrap();
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return;
}
Ok(sref) => sref,
};
if!layer.no_transform {
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
if let Some(ref sref) = layer_sref {
info!(
"Layer '{}': Reprojecting geometry to SRID {}",
layer.name, grid_srid
);
if CoordTransform::new(sref, &grid_sref).is_err() {
error!(
"Layer '{}': Couldn't setup CoordTransform for reprojecting geometry to SRID {}",
layer.name, grid_srid
);
} else {
// We don't store prepared CoordTransform because CoordTransform is
// not Sync and cannot be shared between threads safely
self.geom_transform
.insert(layer.name.clone(), sref.to_wkt().unwrap());
}
} else {
warn!("Layer '{}': Couldn't detect spatialref", layer.name);
}
}
if layer.simplify {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Simplification not supported for GDAL layers",
layer.name
);
}
}
if layer.buffer_size.is_some() {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Clipping with buffer_size not supported for GDAL layers",
layer.name
);
}
}
}
fn retrieve_features<F>(
&self,
_tileset: &str,
layer: &Layer,
extent: &Extent,
zoom: u8,
grid: &Grid,
mut read: F,
) -> u64
where
F: FnMut(&dyn Feature),
{
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
debug!("retrieve_features layer: {}", layer_name);
let mut ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let mut bbox_extent = if let Some(pixels) = layer.buffer_size {
let pixel_width = grid.pixel_width(zoom);
let buf = f64::from(pixels) * pixel_width;
Extent {
minx: extent.minx - buf,
miny: extent.miny - buf,
maxx: extent.maxx + buf,
maxy: extent.maxy + buf,
}
} else {
extent.clone()
};
// CoordTransform for features
let mut transformation = None;
if let Some(ref wkt) = self.geom_transform.get(&layer.name) {
let grid_sref = sref(grid.srid as u32).unwrap();
let layer_sref = SpatialRef::from_wkt(wkt).unwrap();
// Spatial filter must be in layer SRS
let bbox_tr = CoordTransform::new(&grid_sref, &layer_sref).unwrap();
match transform_extent_tr(&bbox_extent, &bbox_tr) {
Ok(extent) => bbox_extent = extent,
Err(e) => {
error!("Unable to transform {:?}: {:?}", bbox_extent, e);
return 0;
}
}
transformation = CoordTransform::new(&layer_sref, &grid_sref).ok();
}
let bbox = Geometry::bbox(
bbox_extent.minx,
bbox_extent.miny,
bbox_extent.maxx,
bbox_extent.maxy,
)
.unwrap();
ogr_layer.set_spatial_filter(&bbox);
let ogr_layer_for_defn = dataset.layer_by_name(layer_name).unwrap();
let fields_defn = ogr_layer_for_defn.defn().fields().collect::<Vec<_>>();
let mut cnt = 0;
let query_limit = layer.query_limit.unwrap_or(0);
for feature in ogr_layer.features() {
let feat = VectorFeature {
layer: layer,
fields_defn: &fields_defn,
grid_srid: grid.srid,
transform: transformation.as_ref(),
feature: &feature,
};
read(&feat);
cnt += 1;
if cnt == query_limit as u64 {
info!(
"Features of layer {} limited to {} (tile query_limit reached, zoom level {})",
layer.name, cnt, zoom
);
break;
}
}
cnt
}
}
/// Projected extent
fn transform_extent(
extent: &Extent,
src_srid: i32,
dest_srid: i32,
) -> Result<Extent, gdal::errors::GdalError> {
let sref_in = sref(src_srid as u32)?;
let sref_out = sref(dest_srid as u32)?;
transform_extent_sref(extent, &sref_in, &sref_out)
}
const WKT_WSG84_LON_LAT: &str = r#"GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Lon",EAST],AXIS["Lat",NORTH],AUTHORITY["EPSG","4326"]]"#;
fn sref(srid: u32) -> Result<SpatialRef, gdal::errors::GdalError> {
if srid == 4326 {
// Return WGS84 in traditional GIS axis order
// See https://github.com/OSGeo/gdal/blob/master/gdal/doc/source/development/rfc/rfc73_proj6_wkt2_srsbarn.rst
SpatialRef::from_wkt(WKT_WSG84_LON_LAT)
} else {
SpatialRef::from_epsg(srid)
}
}
/// Projected extent
fn transform_extent_sref(
extent: &Extent,
src_sref: &SpatialRef,
dest_sref: &SpatialRef,
) -> Result<Extent, gdal::errors::GdalError> {
let transform = CoordTransform::new(src_sref, dest_sref)?;
transform_extent_tr(extent, &transform)
}
/// Projected extent
fn transform_extent_tr(
extent: &Extent,
transformation: &CoordTransform,
) -> Result<Extent, gdal::errors::GdalError>
|
impl<'a> Config<'a, DatasourceCfg> for GdalDatasource {
fn from_config(ds_cfg: &DatasourceCfg) -> Result<Self, String> {
Ok(GdalDatasource::new(ds_cfg.path.as_ref().unwrap()))
}
fn gen_config() -> String {
let toml = r#"
[[datasource]]
name = "ds"
# Dataset specification (http://gdal.org/ogr_formats.html)
path = "<filename-or-connection-spec>"
"#;
toml.to_string()
}
fn gen_runtime_config(&self) -> String {
format!(
r#"
[[datasource]]
path = "{}"
"#,
self.path
)
}
}
|
{
let xs = &mut [extent.minx, extent.maxx];
let ys = &mut [extent.miny, extent.maxy];
transformation.transform_coords(xs, ys, &mut [0.0, 0.0])?;
Ok(Extent {
minx: *xs.get(0).unwrap(),
miny: *ys.get(0).unwrap(),
maxx: *xs.get(1).unwrap(),
maxy: *ys.get(1).unwrap(),
})
}
|
identifier_body
|
gdal_ds.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::gdal_fields::*;
use gdal::spatial_ref::{CoordTransform, SpatialRef};
use gdal::vector::Geometry;
use gdal::Dataset;
use std::collections::BTreeMap;
use std::path::Path;
use t_rex_core::core::config::DatasourceCfg;
use t_rex_core::core::feature::Feature;
use t_rex_core::core::layer::Layer;
use t_rex_core::core::Config;
use t_rex_core::datasource::DatasourceType;
use tile_grid::Extent;
use tile_grid::Grid;
#[derive(Clone)]
pub struct GdalDatasource {
pub path: String,
// We don't store the Dataset, because we need mut access for getting layers
/// SpatialRef WKT for layers which need CoordTransform
geom_transform: BTreeMap<String, String>,
}
impl GdalDatasource {
pub fn new(path: &str) -> GdalDatasource {
GdalDatasource {
path: path.to_string(),
geom_transform: BTreeMap::new(),
}
}
}
impl DatasourceType for GdalDatasource {
/// New instance with connected pool
fn connected(&self) -> GdalDatasource {
GdalDatasource {
path: self.path.clone(),
geom_transform: BTreeMap::new(),
}
}
fn detect_layers(&self, _detect_geometry_types: bool) -> Vec<Layer> {
let mut layers: Vec<Layer> = Vec::new();
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
for gdal_layer in dataset.layers() {
let name = gdal_layer.name();
// Create a layer for each geometry field
for (n, field) in gdal_layer.defn().geom_fields().enumerate() {
let mut layer = Layer::new(&name);
layer.table_name = if n == 0 {
Some(name.clone())
} else {
Some(format!("{}_{}", &name, n))
};
layer.geometry_field = Some(field.name());
layer.geometry_type = geom_type_name(field.field_type());
let srs = field.spatial_ref().unwrap();
if let Ok(epsg) = srs.auth_code() {
layer.srid = Some(epsg)
}
layers.push(layer)
}
}
layers
}
/// Return column field names and Rust compatible type conversion - without geometry column
fn detect_data_columns(&self, _layer: &Layer, _sql: Option<&String>) -> Vec<(String, String)> {
Vec::new() //TODO
}
/// Projected extent
fn reproject_extent(
&self,
extent: &Extent,
dest_srid: i32,
src_srid: Option<i32>,
) -> Option<Extent> {
let ext_srid = src_srid.unwrap_or(4326);
transform_extent(extent, ext_srid, dest_srid).ok()
}
fn layer_extent(&self, layer: &Layer, grid_srid: i32) -> Option<Extent> {
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let extent = match ogr_layer.get_extent() {
Err(e) => {
warn!("Layer '{}': Unable to get extent: {:?}", layer.name, e);
None
}
Ok(extent) => Some(Extent {
minx: extent.MinX,
miny: extent.MinY,
maxx: extent.MaxX,
maxy: extent.MaxY,
}),
};
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
let src_sref = match layer_sref {
Some(ref sref) if!layer.no_transform => sref,
_ => &grid_sref,
};
let wgs84_sref = match sref(4326) {
Err(e) => {
warn!("Unable to get EPSG:4326 spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
match extent {
Some(extent) => match transform_extent_sref(&extent, src_sref, &wgs84_sref) {
Ok(extent) => Some(extent),
Err(e) => {
error!("Unable to transform {:?}: {:?}", extent, e);
None
}
},
None => None,
}
}
fn prepare_queries(&mut self, _tileset: &str, layer: &Layer, grid_srid: i32) {
if!Path::new(&self.path).exists() {
warn!(
"Layer '{}': Can't open dataset '{}'",
layer.name, &self.path
);
// We continue, because GDAL also supports HTTP adresses
}
let dataset = Dataset::open(Path::new(&self.path));
if let Err(ref err) = dataset {
error!("Layer '{}': Error opening dataset: '{}'", layer.name, err);
return;
}
let dataset = dataset.unwrap();
if layer.table_name.is_none() {
error!("Layer '{}': table_name missing", layer.name);
return;
}
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name);
if ogr_layer.is_err() {
error!(
"Layer '{}': Can't find dataset layer '{}'",
layer.name, layer_name
);
return;
}
let ogr_layer = ogr_layer.unwrap();
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return;
}
Ok(sref) => sref,
};
if!layer.no_transform {
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
if let Some(ref sref) = layer_sref {
info!(
"Layer '{}': Reprojecting geometry to SRID {}",
layer.name, grid_srid
);
if CoordTransform::new(sref, &grid_sref).is_err() {
error!(
"Layer '{}': Couldn't setup CoordTransform for reprojecting geometry to SRID {}",
layer.name, grid_srid
);
} else {
// We don't store prepared CoordTransform because CoordTransform is
// not Sync and cannot be shared between threads safely
self.geom_transform
.insert(layer.name.clone(), sref.to_wkt().unwrap());
|
}
if layer.simplify {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Simplification not supported for GDAL layers",
layer.name
);
}
}
if layer.buffer_size.is_some() {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Clipping with buffer_size not supported for GDAL layers",
layer.name
);
}
}
}
fn retrieve_features<F>(
&self,
_tileset: &str,
layer: &Layer,
extent: &Extent,
zoom: u8,
grid: &Grid,
mut read: F,
) -> u64
where
F: FnMut(&dyn Feature),
{
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
debug!("retrieve_features layer: {}", layer_name);
let mut ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let mut bbox_extent = if let Some(pixels) = layer.buffer_size {
let pixel_width = grid.pixel_width(zoom);
let buf = f64::from(pixels) * pixel_width;
Extent {
minx: extent.minx - buf,
miny: extent.miny - buf,
maxx: extent.maxx + buf,
maxy: extent.maxy + buf,
}
} else {
extent.clone()
};
// CoordTransform for features
let mut transformation = None;
if let Some(ref wkt) = self.geom_transform.get(&layer.name) {
let grid_sref = sref(grid.srid as u32).unwrap();
let layer_sref = SpatialRef::from_wkt(wkt).unwrap();
// Spatial filter must be in layer SRS
let bbox_tr = CoordTransform::new(&grid_sref, &layer_sref).unwrap();
match transform_extent_tr(&bbox_extent, &bbox_tr) {
Ok(extent) => bbox_extent = extent,
Err(e) => {
error!("Unable to transform {:?}: {:?}", bbox_extent, e);
return 0;
}
}
transformation = CoordTransform::new(&layer_sref, &grid_sref).ok();
}
let bbox = Geometry::bbox(
bbox_extent.minx,
bbox_extent.miny,
bbox_extent.maxx,
bbox_extent.maxy,
)
.unwrap();
ogr_layer.set_spatial_filter(&bbox);
let ogr_layer_for_defn = dataset.layer_by_name(layer_name).unwrap();
let fields_defn = ogr_layer_for_defn.defn().fields().collect::<Vec<_>>();
let mut cnt = 0;
let query_limit = layer.query_limit.unwrap_or(0);
for feature in ogr_layer.features() {
let feat = VectorFeature {
layer: layer,
fields_defn: &fields_defn,
grid_srid: grid.srid,
transform: transformation.as_ref(),
feature: &feature,
};
read(&feat);
cnt += 1;
if cnt == query_limit as u64 {
info!(
"Features of layer {} limited to {} (tile query_limit reached, zoom level {})",
layer.name, cnt, zoom
);
break;
}
}
cnt
}
}
/// Projected extent
fn transform_extent(
extent: &Extent,
src_srid: i32,
dest_srid: i32,
) -> Result<Extent, gdal::errors::GdalError> {
let sref_in = sref(src_srid as u32)?;
let sref_out = sref(dest_srid as u32)?;
transform_extent_sref(extent, &sref_in, &sref_out)
}
const WKT_WSG84_LON_LAT: &str = r#"GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Lon",EAST],AXIS["Lat",NORTH],AUTHORITY["EPSG","4326"]]"#;
fn sref(srid: u32) -> Result<SpatialRef, gdal::errors::GdalError> {
if srid == 4326 {
// Return WGS84 in traditional GIS axis order
// See https://github.com/OSGeo/gdal/blob/master/gdal/doc/source/development/rfc/rfc73_proj6_wkt2_srsbarn.rst
SpatialRef::from_wkt(WKT_WSG84_LON_LAT)
} else {
SpatialRef::from_epsg(srid)
}
}
/// Projected extent
fn transform_extent_sref(
extent: &Extent,
src_sref: &SpatialRef,
dest_sref: &SpatialRef,
) -> Result<Extent, gdal::errors::GdalError> {
let transform = CoordTransform::new(src_sref, dest_sref)?;
transform_extent_tr(extent, &transform)
}
/// Projected extent
fn transform_extent_tr(
extent: &Extent,
transformation: &CoordTransform,
) -> Result<Extent, gdal::errors::GdalError> {
let xs = &mut [extent.minx, extent.maxx];
let ys = &mut [extent.miny, extent.maxy];
transformation.transform_coords(xs, ys, &mut [0.0, 0.0])?;
Ok(Extent {
minx: *xs.get(0).unwrap(),
miny: *ys.get(0).unwrap(),
maxx: *xs.get(1).unwrap(),
maxy: *ys.get(1).unwrap(),
})
}
impl<'a> Config<'a, DatasourceCfg> for GdalDatasource {
fn from_config(ds_cfg: &DatasourceCfg) -> Result<Self, String> {
Ok(GdalDatasource::new(ds_cfg.path.as_ref().unwrap()))
}
fn gen_config() -> String {
let toml = r#"
[[datasource]]
name = "ds"
# Dataset specification (http://gdal.org/ogr_formats.html)
path = "<filename-or-connection-spec>"
"#;
toml.to_string()
}
fn gen_runtime_config(&self) -> String {
format!(
r#"
[[datasource]]
path = "{}"
"#,
self.path
)
}
}
|
}
} else {
warn!("Layer '{}': Couldn't detect spatialref", layer.name);
}
|
random_line_split
|
gdal_ds.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::gdal_fields::*;
use gdal::spatial_ref::{CoordTransform, SpatialRef};
use gdal::vector::Geometry;
use gdal::Dataset;
use std::collections::BTreeMap;
use std::path::Path;
use t_rex_core::core::config::DatasourceCfg;
use t_rex_core::core::feature::Feature;
use t_rex_core::core::layer::Layer;
use t_rex_core::core::Config;
use t_rex_core::datasource::DatasourceType;
use tile_grid::Extent;
use tile_grid::Grid;
#[derive(Clone)]
pub struct GdalDatasource {
pub path: String,
// We don't store the Dataset, because we need mut access for getting layers
/// SpatialRef WKT for layers which need CoordTransform
geom_transform: BTreeMap<String, String>,
}
impl GdalDatasource {
pub fn new(path: &str) -> GdalDatasource {
GdalDatasource {
path: path.to_string(),
geom_transform: BTreeMap::new(),
}
}
}
impl DatasourceType for GdalDatasource {
/// New instance with connected pool
fn connected(&self) -> GdalDatasource {
GdalDatasource {
path: self.path.clone(),
geom_transform: BTreeMap::new(),
}
}
fn detect_layers(&self, _detect_geometry_types: bool) -> Vec<Layer> {
let mut layers: Vec<Layer> = Vec::new();
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
for gdal_layer in dataset.layers() {
let name = gdal_layer.name();
// Create a layer for each geometry field
for (n, field) in gdal_layer.defn().geom_fields().enumerate() {
let mut layer = Layer::new(&name);
layer.table_name = if n == 0 {
Some(name.clone())
} else {
Some(format!("{}_{}", &name, n))
};
layer.geometry_field = Some(field.name());
layer.geometry_type = geom_type_name(field.field_type());
let srs = field.spatial_ref().unwrap();
if let Ok(epsg) = srs.auth_code() {
layer.srid = Some(epsg)
}
layers.push(layer)
}
}
layers
}
/// Return column field names and Rust compatible type conversion - without geometry column
fn detect_data_columns(&self, _layer: &Layer, _sql: Option<&String>) -> Vec<(String, String)> {
Vec::new() //TODO
}
/// Projected extent
fn reproject_extent(
&self,
extent: &Extent,
dest_srid: i32,
src_srid: Option<i32>,
) -> Option<Extent> {
let ext_srid = src_srid.unwrap_or(4326);
transform_extent(extent, ext_srid, dest_srid).ok()
}
fn layer_extent(&self, layer: &Layer, grid_srid: i32) -> Option<Extent> {
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let extent = match ogr_layer.get_extent() {
Err(e) => {
warn!("Layer '{}': Unable to get extent: {:?}", layer.name, e);
None
}
Ok(extent) => Some(Extent {
minx: extent.MinX,
miny: extent.MinY,
maxx: extent.MaxX,
maxy: extent.MaxY,
}),
};
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
let src_sref = match layer_sref {
Some(ref sref) if!layer.no_transform => sref,
_ => &grid_sref,
};
let wgs84_sref = match sref(4326) {
Err(e) => {
warn!("Unable to get EPSG:4326 spatial reference: {:?}", e);
return None;
}
Ok(sref) => sref,
};
match extent {
Some(extent) => match transform_extent_sref(&extent, src_sref, &wgs84_sref) {
Ok(extent) => Some(extent),
Err(e) => {
error!("Unable to transform {:?}: {:?}", extent, e);
None
}
},
None => None,
}
}
fn prepare_queries(&mut self, _tileset: &str, layer: &Layer, grid_srid: i32) {
if!Path::new(&self.path).exists() {
warn!(
"Layer '{}': Can't open dataset '{}'",
layer.name, &self.path
);
// We continue, because GDAL also supports HTTP adresses
}
let dataset = Dataset::open(Path::new(&self.path));
if let Err(ref err) = dataset {
error!("Layer '{}': Error opening dataset: '{}'", layer.name, err);
return;
}
let dataset = dataset.unwrap();
if layer.table_name.is_none() {
error!("Layer '{}': table_name missing", layer.name);
return;
}
let layer_name = layer.table_name.as_ref().unwrap();
let ogr_layer = dataset.layer_by_name(layer_name);
if ogr_layer.is_err() {
error!(
"Layer '{}': Can't find dataset layer '{}'",
layer.name, layer_name
);
return;
}
let ogr_layer = ogr_layer.unwrap();
let grid_sref = match sref(grid_srid as u32) {
Err(e) => {
error!("Unable to get grid spatial reference: {:?}", e);
return;
}
Ok(sref) => sref,
};
if!layer.no_transform {
let layer_sref = geom_spatialref(&ogr_layer, layer.geometry_field.as_ref());
if let Some(ref sref) = layer_sref {
info!(
"Layer '{}': Reprojecting geometry to SRID {}",
layer.name, grid_srid
);
if CoordTransform::new(sref, &grid_sref).is_err() {
error!(
"Layer '{}': Couldn't setup CoordTransform for reprojecting geometry to SRID {}",
layer.name, grid_srid
);
} else {
// We don't store prepared CoordTransform because CoordTransform is
// not Sync and cannot be shared between threads safely
self.geom_transform
.insert(layer.name.clone(), sref.to_wkt().unwrap());
}
} else
|
}
if layer.simplify {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Simplification not supported for GDAL layers",
layer.name
);
}
}
if layer.buffer_size.is_some() {
if layer.geometry_type!= Some("POINT".to_string()) {
warn!(
"Layer '{}': Clipping with buffer_size not supported for GDAL layers",
layer.name
);
}
}
}
fn retrieve_features<F>(
&self,
_tileset: &str,
layer: &Layer,
extent: &Extent,
zoom: u8,
grid: &Grid,
mut read: F,
) -> u64
where
F: FnMut(&dyn Feature),
{
let dataset = Dataset::open(Path::new(&self.path)).unwrap();
let layer_name = layer.table_name.as_ref().unwrap();
debug!("retrieve_features layer: {}", layer_name);
let mut ogr_layer = dataset.layer_by_name(layer_name).unwrap();
let mut bbox_extent = if let Some(pixels) = layer.buffer_size {
let pixel_width = grid.pixel_width(zoom);
let buf = f64::from(pixels) * pixel_width;
Extent {
minx: extent.minx - buf,
miny: extent.miny - buf,
maxx: extent.maxx + buf,
maxy: extent.maxy + buf,
}
} else {
extent.clone()
};
// CoordTransform for features
let mut transformation = None;
if let Some(ref wkt) = self.geom_transform.get(&layer.name) {
let grid_sref = sref(grid.srid as u32).unwrap();
let layer_sref = SpatialRef::from_wkt(wkt).unwrap();
// Spatial filter must be in layer SRS
let bbox_tr = CoordTransform::new(&grid_sref, &layer_sref).unwrap();
match transform_extent_tr(&bbox_extent, &bbox_tr) {
Ok(extent) => bbox_extent = extent,
Err(e) => {
error!("Unable to transform {:?}: {:?}", bbox_extent, e);
return 0;
}
}
transformation = CoordTransform::new(&layer_sref, &grid_sref).ok();
}
let bbox = Geometry::bbox(
bbox_extent.minx,
bbox_extent.miny,
bbox_extent.maxx,
bbox_extent.maxy,
)
.unwrap();
ogr_layer.set_spatial_filter(&bbox);
let ogr_layer_for_defn = dataset.layer_by_name(layer_name).unwrap();
let fields_defn = ogr_layer_for_defn.defn().fields().collect::<Vec<_>>();
let mut cnt = 0;
let query_limit = layer.query_limit.unwrap_or(0);
for feature in ogr_layer.features() {
let feat = VectorFeature {
layer: layer,
fields_defn: &fields_defn,
grid_srid: grid.srid,
transform: transformation.as_ref(),
feature: &feature,
};
read(&feat);
cnt += 1;
if cnt == query_limit as u64 {
info!(
"Features of layer {} limited to {} (tile query_limit reached, zoom level {})",
layer.name, cnt, zoom
);
break;
}
}
cnt
}
}
/// Projected extent
fn transform_extent(
extent: &Extent,
src_srid: i32,
dest_srid: i32,
) -> Result<Extent, gdal::errors::GdalError> {
let sref_in = sref(src_srid as u32)?;
let sref_out = sref(dest_srid as u32)?;
transform_extent_sref(extent, &sref_in, &sref_out)
}
const WKT_WSG84_LON_LAT: &str = r#"GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Lon",EAST],AXIS["Lat",NORTH],AUTHORITY["EPSG","4326"]]"#;
fn sref(srid: u32) -> Result<SpatialRef, gdal::errors::GdalError> {
if srid == 4326 {
// Return WGS84 in traditional GIS axis order
// See https://github.com/OSGeo/gdal/blob/master/gdal/doc/source/development/rfc/rfc73_proj6_wkt2_srsbarn.rst
SpatialRef::from_wkt(WKT_WSG84_LON_LAT)
} else {
SpatialRef::from_epsg(srid)
}
}
/// Projected extent
fn transform_extent_sref(
extent: &Extent,
src_sref: &SpatialRef,
dest_sref: &SpatialRef,
) -> Result<Extent, gdal::errors::GdalError> {
let transform = CoordTransform::new(src_sref, dest_sref)?;
transform_extent_tr(extent, &transform)
}
/// Projected extent
fn transform_extent_tr(
extent: &Extent,
transformation: &CoordTransform,
) -> Result<Extent, gdal::errors::GdalError> {
let xs = &mut [extent.minx, extent.maxx];
let ys = &mut [extent.miny, extent.maxy];
transformation.transform_coords(xs, ys, &mut [0.0, 0.0])?;
Ok(Extent {
minx: *xs.get(0).unwrap(),
miny: *ys.get(0).unwrap(),
maxx: *xs.get(1).unwrap(),
maxy: *ys.get(1).unwrap(),
})
}
impl<'a> Config<'a, DatasourceCfg> for GdalDatasource {
fn from_config(ds_cfg: &DatasourceCfg) -> Result<Self, String> {
Ok(GdalDatasource::new(ds_cfg.path.as_ref().unwrap()))
}
fn gen_config() -> String {
let toml = r#"
[[datasource]]
name = "ds"
# Dataset specification (http://gdal.org/ogr_formats.html)
path = "<filename-or-connection-spec>"
"#;
toml.to_string()
}
fn gen_runtime_config(&self) -> String {
format!(
r#"
[[datasource]]
path = "{}"
"#,
self.path
)
}
}
|
{
warn!("Layer '{}': Couldn't detect spatialref", layer.name);
}
|
conditional_block
|
enum-headings.rs
|
#![crate_name = "foo"]
// @has foo/enum.Token.html
/// A token!
/// # First
/// Some following text...
|
// @has - '//h4[@id="variant-first"]' "Variant-First"
Declaration {
/// A version!
/// # Variant-Field-First
/// Some following text...
// @has - '//h5[@id="variant-field-first"]' "Variant-Field-First"
version: String,
},
/// A Zoople!
/// # Variant-First
Zoople(
// @has - '//h5[@id="variant-tuple-field-first"]' "Variant-Tuple-Field-First"
/// Zoople's first variant!
/// # Variant-Tuple-Field-First
/// Some following text...
usize,
),
/// Unfinished business!
/// # Non-Exhaustive-First
/// Some following text...
// @has - '//h4[@id="non-exhaustive-first"]' "Non-Exhaustive-First"
#[non_exhaustive]
Unfinished {
/// This is x.
/// # X-First
/// Some following text...
// @has - '//h5[@id="x-first"]' "X-First"
x: usize,
},
}
|
// @has - '//h2[@id="first"]' "First"
pub enum Token {
/// A declaration!
/// # Variant-First
/// Some following text...
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.