file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
solver060.rs
|
// COPYRIGHT (C) 2017 barreiro. All Rights Reserved.
// Rust solvers for Project Euler problems
use std::collections::HashMap;
use euler::algorithm::long::{concatenation, pow_10, square};
use euler::algorithm::prime::{generator_wheel, miller_rabin, prime_sieve};
use euler::Solver;
// The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime.
// For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
//
// Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
pub struct Solver060 {
pub n: isize
}
impl Default for Solver060 {
fn default() -> Self {
Solver060 { n: 5 }
}
}
impl Solver for Solver060 {
fn
|
(&self) -> isize {
let (mut set, primes) = (vec![], generator_wheel().take_while(|&p| p < pow_10(self.n - 1)).collect::<Vec<_>>());
add_prime_to_set(&mut set, self.n as _, &primes, &mut HashMap::new());
set.iter().sum()
}
}
fn add_prime_to_set<'a>(set: &mut Vec<isize>, size: usize, primes: &'a [isize], cache: &mut HashMap<isize, Vec<&'a isize>>) -> bool {
let last_prime = *primes.last().unwrap();
let is_prime = |c| if c < last_prime {
primes.binary_search(&c).is_ok()
} else if c < square(last_prime) {
prime_sieve(c, primes)
} else {
miller_rabin(c)
};
let concatenation_list = |p| primes.iter().filter(|&&prime| prime > p && is_prime(concatenation(p, prime)) && is_prime(concatenation(prime, p))).collect::<Vec<_>>();
// Memoization of the prime concatenations for a 25% speedup, despite increasing code complexity significantly
set.last().iter().for_each(|&&p| { cache.entry(p).or_insert_with(|| concatenation_list(p)); });
// Closure that takes an element of the set and does the intersection with the concatenations of other elements.
// The outcome is the primes that form concatenations with all elements of the set. From there, try to increase the size of the set by recursion.
let candidates = |p| cache.get(p).unwrap().iter().filter(|&c| set.iter().all(|&s| s == *p || cache.get(&s).unwrap().binary_search(c).is_ok())).map(|&&s| s).collect();
set.last().map_or(primes.to_vec(), candidates).iter().any(|&c| {
set.push(c);
if set.len() >= size || add_prime_to_set(set, size, primes, cache) {
true
} else {
set.pop();
false
}
})
}
|
solve
|
identifier_name
|
solver060.rs
|
// COPYRIGHT (C) 2017 barreiro. All Rights Reserved.
// Rust solvers for Project Euler problems
use std::collections::HashMap;
use euler::algorithm::long::{concatenation, pow_10, square};
use euler::algorithm::prime::{generator_wheel, miller_rabin, prime_sieve};
use euler::Solver;
// The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime.
// For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
//
// Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
pub struct Solver060 {
pub n: isize
}
impl Default for Solver060 {
fn default() -> Self {
Solver060 { n: 5 }
}
}
impl Solver for Solver060 {
fn solve(&self) -> isize {
let (mut set, primes) = (vec![], generator_wheel().take_while(|&p| p < pow_10(self.n - 1)).collect::<Vec<_>>());
add_prime_to_set(&mut set, self.n as _, &primes, &mut HashMap::new());
set.iter().sum()
}
}
fn add_prime_to_set<'a>(set: &mut Vec<isize>, size: usize, primes: &'a [isize], cache: &mut HashMap<isize, Vec<&'a isize>>) -> bool {
let last_prime = *primes.last().unwrap();
let is_prime = |c| if c < last_prime {
primes.binary_search(&c).is_ok()
} else if c < square(last_prime) {
prime_sieve(c, primes)
} else {
miller_rabin(c)
};
let concatenation_list = |p| primes.iter().filter(|&&prime| prime > p && is_prime(concatenation(p, prime)) && is_prime(concatenation(prime, p))).collect::<Vec<_>>();
|
// Closure that takes an element of the set and does the intersection with the concatenations of other elements.
// The outcome is the primes that form concatenations with all elements of the set. From there, try to increase the size of the set by recursion.
let candidates = |p| cache.get(p).unwrap().iter().filter(|&c| set.iter().all(|&s| s == *p || cache.get(&s).unwrap().binary_search(c).is_ok())).map(|&&s| s).collect();
set.last().map_or(primes.to_vec(), candidates).iter().any(|&c| {
set.push(c);
if set.len() >= size || add_prime_to_set(set, size, primes, cache) {
true
} else {
set.pop();
false
}
})
}
|
// Memoization of the prime concatenations for a 25% speedup, despite increasing code complexity significantly
set.last().iter().for_each(|&&p| { cache.entry(p).or_insert_with(|| concatenation_list(p)); });
|
random_line_split
|
solver060.rs
|
// COPYRIGHT (C) 2017 barreiro. All Rights Reserved.
// Rust solvers for Project Euler problems
use std::collections::HashMap;
use euler::algorithm::long::{concatenation, pow_10, square};
use euler::algorithm::prime::{generator_wheel, miller_rabin, prime_sieve};
use euler::Solver;
// The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime.
// For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
//
// Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
pub struct Solver060 {
pub n: isize
}
impl Default for Solver060 {
fn default() -> Self {
Solver060 { n: 5 }
}
}
impl Solver for Solver060 {
fn solve(&self) -> isize {
let (mut set, primes) = (vec![], generator_wheel().take_while(|&p| p < pow_10(self.n - 1)).collect::<Vec<_>>());
add_prime_to_set(&mut set, self.n as _, &primes, &mut HashMap::new());
set.iter().sum()
}
}
fn add_prime_to_set<'a>(set: &mut Vec<isize>, size: usize, primes: &'a [isize], cache: &mut HashMap<isize, Vec<&'a isize>>) -> bool {
let last_prime = *primes.last().unwrap();
let is_prime = |c| if c < last_prime {
primes.binary_search(&c).is_ok()
} else if c < square(last_prime) {
prime_sieve(c, primes)
} else {
miller_rabin(c)
};
let concatenation_list = |p| primes.iter().filter(|&&prime| prime > p && is_prime(concatenation(p, prime)) && is_prime(concatenation(prime, p))).collect::<Vec<_>>();
// Memoization of the prime concatenations for a 25% speedup, despite increasing code complexity significantly
set.last().iter().for_each(|&&p| { cache.entry(p).or_insert_with(|| concatenation_list(p)); });
// Closure that takes an element of the set and does the intersection with the concatenations of other elements.
// The outcome is the primes that form concatenations with all elements of the set. From there, try to increase the size of the set by recursion.
let candidates = |p| cache.get(p).unwrap().iter().filter(|&c| set.iter().all(|&s| s == *p || cache.get(&s).unwrap().binary_search(c).is_ok())).map(|&&s| s).collect();
set.last().map_or(primes.to_vec(), candidates).iter().any(|&c| {
set.push(c);
if set.len() >= size || add_prime_to_set(set, size, primes, cache) {
true
} else
|
})
}
|
{
set.pop();
false
}
|
conditional_block
|
extractor.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with
// this file, You can obtain one at https://mozilla.org/MPL/2.0/.
use message::Message;
use error::{Error, Result};
use common::{find_string, escape_string};
use std::collections::HashMap;
use std::path::Path;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use regex::Regex;
use walkdir::WalkDir;
/// Struct that extracts all messages from source code and can print them
/// to a `.pot` file.
///
/// This file can then be used as a starting point to begin translation.
/// It should be relatively similar to `gettext` generated files.
///
/// # Example
///
/// ```
/// use crowbook_intl::Extractor;
/// let mut extractor = Extractor::new();
/// extractor.add_messages_from_dir("src/").unwrap();
/// println!("{}", extractor.generate_pot_file());
/// ```
///
/// # Note
///
/// This struct only add messages that are considered as needing localization,
/// that is, the first argument of calls so `lformat!` macro.
#[derive(Debug, Clone)]
pub struct Extractor {
messages: HashMap<String, Message>,
// Matches the format string (as used by `lformat!` and the actual escaped string
// given to potfile
orig_strings: HashMap<String, String>,
}
impl Extractor {
/// Create a new, empty extractor
pub fn
|
() -> Extractor {
Extractor {
messages: HashMap::new(),
orig_strings: HashMap::new(),
}
}
/// Returns a hashmap mapping the original strings (as used by `lformat!`)
/// to escaped strings. Only contains strings that are different and
/// must thus be handled.
pub fn original_strings<'a>(&'a self) -> &'a HashMap<String, String> {
&self.orig_strings
}
/// Add all the messages contained in a source file
pub fn add_messages_from_file<P: AsRef<Path>>(&mut self, file: P) -> Result<()> {
lazy_static! {
static ref REMOVE_COMMS: Regex = Regex::new(r#"//[^\n]*"#).unwrap();
static ref FIND_MSGS: Regex = Regex::new(r#"lformat!\("#).unwrap();
}
let filename = format!("{}", file.as_ref().display());
let mut f = try!(File::open(file)
.map_err(|e| Error::parse(format!("could not open file {}: {}",
&filename,
e))));
let mut content = String::new();
try!(f.read_to_string(&mut content)
.map_err(|e| Error::parse(format!("could not read file {}: {}",
&filename,
e))));
content = REMOVE_COMMS.replace_all(&content, "").into_owned();
for caps in FIND_MSGS.captures_iter(&content) {
let pos = caps.get(0).unwrap().end();
let line = 1 + &content[..pos].bytes().filter(|b| b == &b'\n').count();
let bytes = content[pos..].as_bytes();
let orig_msg: String = try!(find_string(bytes)
.map_err(|_| Error::parse(format!("{}:{}: could not parse as string",
&filename,
line))));
let msg = escape_string(orig_msg.as_str()).into_owned();
if msg!= orig_msg {
self.orig_strings.insert(orig_msg, msg.clone());
}
if self.messages.contains_key(msg.as_str()) {
self.messages.get_mut(&msg).unwrap().add_source(filename.as_str(), line);
} else {
let mut message = Message::new(msg.as_str());
message.add_source(filename.as_str(), line);
self.messages.insert(msg, message);
}
}
Ok(())
}
/// Add messages from all `.rs` files contained in a directory
/// (walks through subdirectories)
pub fn add_messages_from_dir<P: AsRef<Path>>(&mut self, dir: P) -> Result<()> {
let filtered = WalkDir::new(dir)
.into_iter()
.filter_map(|e| e.ok())
.map(|e| e.path()
.to_string_lossy()
.into_owned())
.filter(|s| s.ends_with(".rs"));
for filename in filtered {
try!(self.add_messages_from_file(&filename));
}
Ok(())
}
/// Generate a pot-like file from the strings extracted from all files (if any)
pub fn generate_pot_file(&self) -> String {
let mut output = String::from(POT_HEADER);
let mut values = self.messages
.values()
.collect::<Vec<_>>();
values.sort();
for value in values {
output.push_str(&format!("{}", value));
}
output
}
/// Write a pot-like file to specified location
pub fn write_pot_file(&mut self, file: &str) -> Result<()> {
let mut f = try!(File::create(file).map_err(|e| Error::new(format!("Could not create file {}: {}",
file, e))));
let content = self.generate_pot_file();
try!(f.write_all(content.as_bytes())
.map_err(|e| Error::new(format!("Could not write to file {}: {}",
file, e))));
Ok(())
}
}
const POT_HEADER: &'static str = r#"# SOME DESCRIPTIVE TITLE
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# LICENSE
# AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"#;
|
new
|
identifier_name
|
extractor.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with
// this file, You can obtain one at https://mozilla.org/MPL/2.0/.
use message::Message;
use error::{Error, Result};
use common::{find_string, escape_string};
use std::collections::HashMap;
use std::path::Path;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use regex::Regex;
use walkdir::WalkDir;
/// Struct that extracts all messages from source code and can print them
/// to a `.pot` file.
///
/// This file can then be used as a starting point to begin translation.
/// It should be relatively similar to `gettext` generated files.
///
/// # Example
///
/// ```
/// use crowbook_intl::Extractor;
/// let mut extractor = Extractor::new();
/// extractor.add_messages_from_dir("src/").unwrap();
/// println!("{}", extractor.generate_pot_file());
/// ```
///
/// # Note
///
/// This struct only add messages that are considered as needing localization,
/// that is, the first argument of calls so `lformat!` macro.
#[derive(Debug, Clone)]
pub struct Extractor {
messages: HashMap<String, Message>,
// Matches the format string (as used by `lformat!` and the actual escaped string
// given to potfile
orig_strings: HashMap<String, String>,
}
impl Extractor {
/// Create a new, empty extractor
pub fn new() -> Extractor {
Extractor {
messages: HashMap::new(),
orig_strings: HashMap::new(),
}
}
/// Returns a hashmap mapping the original strings (as used by `lformat!`)
/// to escaped strings. Only contains strings that are different and
/// must thus be handled.
pub fn original_strings<'a>(&'a self) -> &'a HashMap<String, String> {
&self.orig_strings
}
/// Add all the messages contained in a source file
pub fn add_messages_from_file<P: AsRef<Path>>(&mut self, file: P) -> Result<()> {
lazy_static! {
static ref REMOVE_COMMS: Regex = Regex::new(r#"//[^\n]*"#).unwrap();
static ref FIND_MSGS: Regex = Regex::new(r#"lformat!\("#).unwrap();
}
let filename = format!("{}", file.as_ref().display());
let mut f = try!(File::open(file)
.map_err(|e| Error::parse(format!("could not open file {}: {}",
&filename,
e))));
let mut content = String::new();
try!(f.read_to_string(&mut content)
.map_err(|e| Error::parse(format!("could not read file {}: {}",
&filename,
e))));
content = REMOVE_COMMS.replace_all(&content, "").into_owned();
for caps in FIND_MSGS.captures_iter(&content) {
let pos = caps.get(0).unwrap().end();
let line = 1 + &content[..pos].bytes().filter(|b| b == &b'\n').count();
let bytes = content[pos..].as_bytes();
let orig_msg: String = try!(find_string(bytes)
.map_err(|_| Error::parse(format!("{}:{}: could not parse as string",
&filename,
line))));
let msg = escape_string(orig_msg.as_str()).into_owned();
if msg!= orig_msg
|
if self.messages.contains_key(msg.as_str()) {
self.messages.get_mut(&msg).unwrap().add_source(filename.as_str(), line);
} else {
let mut message = Message::new(msg.as_str());
message.add_source(filename.as_str(), line);
self.messages.insert(msg, message);
}
}
Ok(())
}
/// Add messages from all `.rs` files contained in a directory
/// (walks through subdirectories)
pub fn add_messages_from_dir<P: AsRef<Path>>(&mut self, dir: P) -> Result<()> {
let filtered = WalkDir::new(dir)
.into_iter()
.filter_map(|e| e.ok())
.map(|e| e.path()
.to_string_lossy()
.into_owned())
.filter(|s| s.ends_with(".rs"));
for filename in filtered {
try!(self.add_messages_from_file(&filename));
}
Ok(())
}
/// Generate a pot-like file from the strings extracted from all files (if any)
pub fn generate_pot_file(&self) -> String {
let mut output = String::from(POT_HEADER);
let mut values = self.messages
.values()
.collect::<Vec<_>>();
values.sort();
for value in values {
output.push_str(&format!("{}", value));
}
output
}
/// Write a pot-like file to specified location
pub fn write_pot_file(&mut self, file: &str) -> Result<()> {
let mut f = try!(File::create(file).map_err(|e| Error::new(format!("Could not create file {}: {}",
file, e))));
let content = self.generate_pot_file();
try!(f.write_all(content.as_bytes())
.map_err(|e| Error::new(format!("Could not write to file {}: {}",
file, e))));
Ok(())
}
}
const POT_HEADER: &'static str = r#"# SOME DESCRIPTIVE TITLE
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# LICENSE
# AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"#;
|
{
self.orig_strings.insert(orig_msg, msg.clone());
}
|
conditional_block
|
extractor.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with
// this file, You can obtain one at https://mozilla.org/MPL/2.0/.
use message::Message;
use error::{Error, Result};
use common::{find_string, escape_string};
use std::collections::HashMap;
use std::path::Path;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use regex::Regex;
use walkdir::WalkDir;
/// Struct that extracts all messages from source code and can print them
/// to a `.pot` file.
///
/// This file can then be used as a starting point to begin translation.
/// It should be relatively similar to `gettext` generated files.
///
/// # Example
///
/// ```
/// use crowbook_intl::Extractor;
/// let mut extractor = Extractor::new();
/// extractor.add_messages_from_dir("src/").unwrap();
/// println!("{}", extractor.generate_pot_file());
/// ```
///
/// # Note
///
/// This struct only add messages that are considered as needing localization,
/// that is, the first argument of calls so `lformat!` macro.
#[derive(Debug, Clone)]
pub struct Extractor {
messages: HashMap<String, Message>,
// Matches the format string (as used by `lformat!` and the actual escaped string
// given to potfile
orig_strings: HashMap<String, String>,
}
impl Extractor {
/// Create a new, empty extractor
pub fn new() -> Extractor {
Extractor {
messages: HashMap::new(),
orig_strings: HashMap::new(),
}
}
/// Returns a hashmap mapping the original strings (as used by `lformat!`)
/// to escaped strings. Only contains strings that are different and
/// must thus be handled.
pub fn original_strings<'a>(&'a self) -> &'a HashMap<String, String> {
&self.orig_strings
}
/// Add all the messages contained in a source file
pub fn add_messages_from_file<P: AsRef<Path>>(&mut self, file: P) -> Result<()> {
lazy_static! {
static ref REMOVE_COMMS: Regex = Regex::new(r#"//[^\n]*"#).unwrap();
static ref FIND_MSGS: Regex = Regex::new(r#"lformat!\("#).unwrap();
}
let filename = format!("{}", file.as_ref().display());
let mut f = try!(File::open(file)
.map_err(|e| Error::parse(format!("could not open file {}: {}",
&filename,
e))));
let mut content = String::new();
try!(f.read_to_string(&mut content)
.map_err(|e| Error::parse(format!("could not read file {}: {}",
&filename,
e))));
content = REMOVE_COMMS.replace_all(&content, "").into_owned();
for caps in FIND_MSGS.captures_iter(&content) {
let pos = caps.get(0).unwrap().end();
let line = 1 + &content[..pos].bytes().filter(|b| b == &b'\n').count();
let bytes = content[pos..].as_bytes();
let orig_msg: String = try!(find_string(bytes)
.map_err(|_| Error::parse(format!("{}:{}: could not parse as string",
&filename,
line))));
let msg = escape_string(orig_msg.as_str()).into_owned();
if msg!= orig_msg {
self.orig_strings.insert(orig_msg, msg.clone());
}
|
if self.messages.contains_key(msg.as_str()) {
self.messages.get_mut(&msg).unwrap().add_source(filename.as_str(), line);
} else {
let mut message = Message::new(msg.as_str());
message.add_source(filename.as_str(), line);
self.messages.insert(msg, message);
}
}
Ok(())
}
/// Add messages from all `.rs` files contained in a directory
/// (walks through subdirectories)
pub fn add_messages_from_dir<P: AsRef<Path>>(&mut self, dir: P) -> Result<()> {
let filtered = WalkDir::new(dir)
.into_iter()
.filter_map(|e| e.ok())
.map(|e| e.path()
.to_string_lossy()
.into_owned())
.filter(|s| s.ends_with(".rs"));
for filename in filtered {
try!(self.add_messages_from_file(&filename));
}
Ok(())
}
/// Generate a pot-like file from the strings extracted from all files (if any)
pub fn generate_pot_file(&self) -> String {
let mut output = String::from(POT_HEADER);
let mut values = self.messages
.values()
.collect::<Vec<_>>();
values.sort();
for value in values {
output.push_str(&format!("{}", value));
}
output
}
/// Write a pot-like file to specified location
pub fn write_pot_file(&mut self, file: &str) -> Result<()> {
let mut f = try!(File::create(file).map_err(|e| Error::new(format!("Could not create file {}: {}",
file, e))));
let content = self.generate_pot_file();
try!(f.write_all(content.as_bytes())
.map_err(|e| Error::new(format!("Could not write to file {}: {}",
file, e))));
Ok(())
}
}
const POT_HEADER: &'static str = r#"# SOME DESCRIPTIVE TITLE
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# LICENSE
# AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"#;
|
random_line_split
|
|
vulkano_gralloc.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! vulkano_gralloc: Implements swapchain allocation and memory mapping
//! using Vulkano.
//!
//! External code found at https://github.com/vulkano-rs/vulkano.
#![cfg(feature = "vulkano")]
use std::collections::BTreeMap as Map;
use std::convert::TryInto;
use std::iter::Empty;
use std::sync::Arc;
use base::MappedRegion;
use crate::rutabaga_gralloc::gralloc::{Gralloc, ImageAllocationInfo, ImageMemoryRequirements};
use crate::rutabaga_utils::*;
use vulkano::device::physical::{MemoryType, PhysicalDevice, PhysicalDeviceType};
|
};
use vulkano::instance::{Instance, InstanceCreationError, InstanceExtensions, Version};
use vulkano::memory::{
DedicatedAlloc, DeviceMemoryAllocError, DeviceMemoryBuilder, DeviceMemoryMapping,
ExternalMemoryHandleType, MemoryRequirements,
};
use vulkano::memory::pool::AllocFromRequirementsFilter;
use vulkano::sync::Sharing;
/// A gralloc implementation capable of allocation `VkDeviceMemory`.
pub struct VulkanoGralloc {
devices: Map<PhysicalDeviceType, Arc<Device>>,
has_integrated_gpu: bool,
}
struct VulkanoMapping {
mapping: DeviceMemoryMapping,
size: usize,
}
impl VulkanoMapping {
pub fn new(mapping: DeviceMemoryMapping, size: usize) -> VulkanoMapping {
VulkanoMapping { mapping, size }
}
}
unsafe impl MappedRegion for VulkanoMapping {
/// Used for passing this region for hypervisor memory mappings. We trust crosvm to use this
/// safely.
fn as_ptr(&self) -> *mut u8 {
unsafe { self.mapping.as_ptr() }
}
/// Returns the size of the memory region in bytes.
fn size(&self) -> usize {
self.size
}
}
impl VulkanoGralloc {
/// Returns a new `VulkanGralloc' instance upon success.
pub fn init() -> RutabagaResult<Box<dyn Gralloc>> {
// Initialization copied from triangle.rs in Vulkano. Look there for a more detailed
// explanation of VK initialization.
let instance_extensions = InstanceExtensions {
khr_external_memory_capabilities: true,
khr_get_physical_device_properties2: true,
..InstanceExtensions::none()
};
let instance = Instance::new(None, Version::V1_1, &instance_extensions, None)?;
let mut devices: Map<PhysicalDeviceType, Arc<Device>> = Default::default();
let mut has_integrated_gpu = false;
for physical in PhysicalDevice::enumerate(&instance) {
let queue_family = physical
.queue_families()
.find(|&q| {
// We take the first queue family that supports graphics.
q.supports_graphics()
})
.ok_or(RutabagaError::SpecViolation(
"need graphics queue family to proceed",
))?;
let supported_extensions = physical.supported_extensions();
let desired_extensions = DeviceExtensions {
khr_dedicated_allocation: true,
khr_get_memory_requirements2: true,
khr_external_memory: true,
khr_external_memory_fd: true,
ext_external_memory_dma_buf: true,
..DeviceExtensions::none()
};
let intersection = supported_extensions.intersection(&desired_extensions);
if let Ok(device, mut _queues) = Device::new(
physical,
physical.supported_features(),
&intersection,
[(queue_family, 0.5)].iter().cloned(),
) {
let device_type = device.physical_device().properties().device_type;
if device_type == PhysicalDeviceType::IntegratedGpu {
has_integrated_gpu = true
}
// If we have two devices of the same type (two integrated GPUs), the old value is
// dropped. Vulkano is verbose enough such that a keener selection algorithm may
// be used, but the need for such complexity does not seem to exist now.
devices.insert(device_type, device);
};
}
if devices.is_empty() {
return Err(RutabagaError::SpecViolation(
"no matching VK devices available",
));
}
Ok(Box::new(VulkanoGralloc {
devices,
has_integrated_gpu,
}))
}
// This function is used safely in this module because gralloc does not:
//
// (1) bind images to any memory.
// (2) transition the layout of images.
// (3) transfer ownership of images between queues.
//
// In addition, we trust Vulkano to validate image parameters are within the Vulkan spec.
unsafe fn create_image(
&mut self,
info: ImageAllocationInfo,
) -> RutabagaResult<(sys::UnsafeImage, MemoryRequirements)> {
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let usage = match info.flags.uses_rendering() {
true => ImageUsage {
color_attachment: true,
..ImageUsage::none()
},
false => ImageUsage {
sampled: true,
..ImageUsage::none()
},
};
// Reasonable bounds on image width.
if info.width == 0 || info.width > 4096 {
return Err(RutabagaError::InvalidGrallocDimensions);
}
// Reasonable bounds on image height.
if info.height == 0 || info.height > 4096 {
return Err(RutabagaError::InvalidGrallocDimensions);
}
let vulkan_format = info.drm_format.vulkan_format()?;
let (unsafe_image, memory_requirements) = sys::UnsafeImage::new(
device.clone(),
usage,
vulkan_format,
ImageCreateFlags::none(),
ImageDimensions::Dim2d {
width: info.width,
height: info.height,
array_layers: 1,
},
SampleCount::Sample1,
1, /* mipmap count */
Sharing::Exclusive::<Empty<_>>,
true, /* linear images only currently */
false, /* not preinitialized */
)?;
Ok((unsafe_image, memory_requirements))
}
}
impl Gralloc for VulkanoGralloc {
fn supports_external_gpu_memory(&self) -> bool {
for device in self.devices.values() {
if!device.enabled_extensions().khr_external_memory {
return false;
}
}
true
}
fn supports_dmabuf(&self) -> bool {
for device in self.devices.values() {
if!device.enabled_extensions().ext_external_memory_dma_buf {
return false;
}
}
true
}
fn get_image_memory_requirements(
&mut self,
info: ImageAllocationInfo,
) -> RutabagaResult<ImageMemoryRequirements> {
let mut reqs: ImageMemoryRequirements = Default::default();
let (unsafe_image, memory_requirements) = unsafe { self.create_image(info)? };
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let planar_layout = info.drm_format.planar_layout()?;
// Safe because we created the image with the linear bit set and verified the format is
// not a depth or stencil format. We are also using the correct image aspect. Vulkano
// will panic if we are not.
for plane in 0..planar_layout.num_planes {
let aspect = info.drm_format.vulkan_image_aspect(plane)?;
let layout = unsafe { unsafe_image.multiplane_color_layout(aspect) };
reqs.strides[plane] = layout.row_pitch as u32;
reqs.offsets[plane] = layout.offset as u32;
}
let need_visible = info.flags.host_visible();
let want_cached = info.flags.host_cached();
let memory_type = {
let filter = |current_type: MemoryType| {
if need_visible &&!current_type.is_host_visible() {
return AllocFromRequirementsFilter::Forbidden;
}
if!need_visible && current_type.is_device_local() {
return AllocFromRequirementsFilter::Preferred;
}
if need_visible && want_cached && current_type.is_host_cached() {
return AllocFromRequirementsFilter::Preferred;
}
if need_visible
&&!want_cached
&& current_type.is_host_coherent()
&&!current_type.is_host_cached()
{
return AllocFromRequirementsFilter::Preferred;
}
AllocFromRequirementsFilter::Allowed
};
let first_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Preferred));
let second_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Allowed));
first_loop
.chain(second_loop)
.filter(|&(t, _)| (memory_requirements.memory_type_bits & (1 << t.id()))!= 0)
.find(|&(t, rq)| filter(t) == rq)
.ok_or(RutabagaError::SpecViolation(
"unable to find required memory type",
))?
.0
};
reqs.info = info;
reqs.size = memory_requirements.size as u64;
if memory_type.is_host_visible() {
if memory_type.is_host_cached() {
reqs.map_info = RUTABAGA_MAP_CACHE_CACHED;
} else if memory_type.is_host_coherent() {
reqs.map_info = RUTABAGA_MAP_CACHE_WC;
}
}
reqs.vulkan_info = Some(VulkanInfo {
memory_idx: memory_type.id() as u32,
physical_device_idx: device.physical_device().index() as u32,
});
Ok(reqs)
}
fn allocate_memory(&mut self, reqs: ImageMemoryRequirements) -> RutabagaResult<RutabagaHandle> {
let (unsafe_image, memory_requirements) = unsafe { self.create_image(reqs.info)? };
let vulkan_info = reqs.vulkan_info.ok_or(RutabagaError::InvalidVulkanInfo)?;
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let memory_type = device
.physical_device()
.memory_type_by_id(vulkan_info.memory_idx)
.ok_or(RutabagaError::InvalidVulkanInfo)?;
let (handle_type, rutabaga_type) =
match device.enabled_extensions().ext_external_memory_dma_buf {
true => (
ExternalMemoryHandleType {
dma_buf: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_DMABUF,
),
false => (
ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD,
),
};
let dedicated = match device.enabled_extensions().khr_dedicated_allocation {
true => {
if memory_requirements.prefer_dedicated {
DedicatedAlloc::Image(&unsafe_image)
} else {
DedicatedAlloc::None
}
}
false => DedicatedAlloc::None,
};
let device_memory = DeviceMemoryBuilder::new(device.clone(), memory_type.id(), reqs.size)
.dedicated_info(dedicated)
.export_info(handle_type)
.build()?;
let descriptor = device_memory.export_fd(handle_type)?.into();
Ok(RutabagaHandle {
os_handle: descriptor,
handle_type: rutabaga_type,
})
}
/// Implementations must map the memory associated with the `resource_id` upon success.
fn import_and_map(
&mut self,
handle: RutabagaHandle,
vulkan_info: VulkanInfo,
size: u64,
) -> RutabagaResult<Box<dyn MappedRegion>> {
let device = self
.devices
.values()
.find(|device| {
device.physical_device().index() as u32 == vulkan_info.physical_device_idx
})
.ok_or(RutabagaError::InvalidVulkanInfo)?;
let handle_type = match handle.handle_type {
RUTABAGA_MEM_HANDLE_TYPE_DMABUF => ExternalMemoryHandleType {
dma_buf: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD => ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
},
_ => return Err(RutabagaError::InvalidRutabagaHandle),
};
let device_memory = DeviceMemoryBuilder::new(device.clone(), vulkan_info.memory_idx, size)
.import_info(handle.os_handle.into(), handle_type)
.build()?;
let mapping = DeviceMemoryMapping::new(device.clone(), device_memory.clone(), 0, size, 0)?;
Ok(Box::new(VulkanoMapping::new(mapping, size.try_into()?)))
}
}
// Vulkano should really define an universal type that wraps all these errors, say
// "VulkanoError(e)".
impl From<InstanceCreationError> for RutabagaError {
fn from(e: InstanceCreationError) -> RutabagaError {
RutabagaError::VkInstanceCreationError(e)
}
}
impl From<ImageCreationError> for RutabagaError {
fn from(e: ImageCreationError) -> RutabagaError {
RutabagaError::VkImageCreationError(e)
}
}
impl From<DeviceCreationError> for RutabagaError {
fn from(e: DeviceCreationError) -> RutabagaError {
RutabagaError::VkDeviceCreationError(e)
}
}
impl From<DeviceMemoryAllocError> for RutabagaError {
fn from(e: DeviceMemoryAllocError) -> RutabagaError {
RutabagaError::VkDeviceMemoryAllocError(e)
}
}
|
use vulkano::device::{Device, DeviceCreationError, DeviceExtensions};
use vulkano::image::{
sys, ImageCreateFlags, ImageCreationError, ImageDimensions, ImageUsage, SampleCount,
|
random_line_split
|
vulkano_gralloc.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! vulkano_gralloc: Implements swapchain allocation and memory mapping
//! using Vulkano.
//!
//! External code found at https://github.com/vulkano-rs/vulkano.
#![cfg(feature = "vulkano")]
use std::collections::BTreeMap as Map;
use std::convert::TryInto;
use std::iter::Empty;
use std::sync::Arc;
use base::MappedRegion;
use crate::rutabaga_gralloc::gralloc::{Gralloc, ImageAllocationInfo, ImageMemoryRequirements};
use crate::rutabaga_utils::*;
use vulkano::device::physical::{MemoryType, PhysicalDevice, PhysicalDeviceType};
use vulkano::device::{Device, DeviceCreationError, DeviceExtensions};
use vulkano::image::{
sys, ImageCreateFlags, ImageCreationError, ImageDimensions, ImageUsage, SampleCount,
};
use vulkano::instance::{Instance, InstanceCreationError, InstanceExtensions, Version};
use vulkano::memory::{
DedicatedAlloc, DeviceMemoryAllocError, DeviceMemoryBuilder, DeviceMemoryMapping,
ExternalMemoryHandleType, MemoryRequirements,
};
use vulkano::memory::pool::AllocFromRequirementsFilter;
use vulkano::sync::Sharing;
/// A gralloc implementation capable of allocation `VkDeviceMemory`.
pub struct VulkanoGralloc {
devices: Map<PhysicalDeviceType, Arc<Device>>,
has_integrated_gpu: bool,
}
struct VulkanoMapping {
mapping: DeviceMemoryMapping,
size: usize,
}
impl VulkanoMapping {
pub fn new(mapping: DeviceMemoryMapping, size: usize) -> VulkanoMapping {
VulkanoMapping { mapping, size }
}
}
unsafe impl MappedRegion for VulkanoMapping {
/// Used for passing this region for hypervisor memory mappings. We trust crosvm to use this
/// safely.
fn as_ptr(&self) -> *mut u8 {
unsafe { self.mapping.as_ptr() }
}
/// Returns the size of the memory region in bytes.
fn size(&self) -> usize {
self.size
}
}
impl VulkanoGralloc {
/// Returns a new `VulkanGralloc' instance upon success.
pub fn init() -> RutabagaResult<Box<dyn Gralloc>> {
// Initialization copied from triangle.rs in Vulkano. Look there for a more detailed
// explanation of VK initialization.
let instance_extensions = InstanceExtensions {
khr_external_memory_capabilities: true,
khr_get_physical_device_properties2: true,
..InstanceExtensions::none()
};
let instance = Instance::new(None, Version::V1_1, &instance_extensions, None)?;
let mut devices: Map<PhysicalDeviceType, Arc<Device>> = Default::default();
let mut has_integrated_gpu = false;
for physical in PhysicalDevice::enumerate(&instance) {
let queue_family = physical
.queue_families()
.find(|&q| {
// We take the first queue family that supports graphics.
q.supports_graphics()
})
.ok_or(RutabagaError::SpecViolation(
"need graphics queue family to proceed",
))?;
let supported_extensions = physical.supported_extensions();
let desired_extensions = DeviceExtensions {
khr_dedicated_allocation: true,
khr_get_memory_requirements2: true,
khr_external_memory: true,
khr_external_memory_fd: true,
ext_external_memory_dma_buf: true,
..DeviceExtensions::none()
};
let intersection = supported_extensions.intersection(&desired_extensions);
if let Ok(device, mut _queues) = Device::new(
physical,
physical.supported_features(),
&intersection,
[(queue_family, 0.5)].iter().cloned(),
) {
let device_type = device.physical_device().properties().device_type;
if device_type == PhysicalDeviceType::IntegratedGpu {
has_integrated_gpu = true
}
// If we have two devices of the same type (two integrated GPUs), the old value is
// dropped. Vulkano is verbose enough such that a keener selection algorithm may
// be used, but the need for such complexity does not seem to exist now.
devices.insert(device_type, device);
};
}
if devices.is_empty() {
return Err(RutabagaError::SpecViolation(
"no matching VK devices available",
));
}
Ok(Box::new(VulkanoGralloc {
devices,
has_integrated_gpu,
}))
}
// This function is used safely in this module because gralloc does not:
//
// (1) bind images to any memory.
// (2) transition the layout of images.
// (3) transfer ownership of images between queues.
//
// In addition, we trust Vulkano to validate image parameters are within the Vulkan spec.
unsafe fn create_image(
&mut self,
info: ImageAllocationInfo,
) -> RutabagaResult<(sys::UnsafeImage, MemoryRequirements)> {
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let usage = match info.flags.uses_rendering() {
true => ImageUsage {
color_attachment: true,
..ImageUsage::none()
},
false => ImageUsage {
sampled: true,
..ImageUsage::none()
},
};
// Reasonable bounds on image width.
if info.width == 0 || info.width > 4096 {
return Err(RutabagaError::InvalidGrallocDimensions);
}
// Reasonable bounds on image height.
if info.height == 0 || info.height > 4096 {
return Err(RutabagaError::InvalidGrallocDimensions);
}
let vulkan_format = info.drm_format.vulkan_format()?;
let (unsafe_image, memory_requirements) = sys::UnsafeImage::new(
device.clone(),
usage,
vulkan_format,
ImageCreateFlags::none(),
ImageDimensions::Dim2d {
width: info.width,
height: info.height,
array_layers: 1,
},
SampleCount::Sample1,
1, /* mipmap count */
Sharing::Exclusive::<Empty<_>>,
true, /* linear images only currently */
false, /* not preinitialized */
)?;
Ok((unsafe_image, memory_requirements))
}
}
impl Gralloc for VulkanoGralloc {
fn supports_external_gpu_memory(&self) -> bool {
for device in self.devices.values() {
if!device.enabled_extensions().khr_external_memory {
return false;
}
}
true
}
fn supports_dmabuf(&self) -> bool
|
fn get_image_memory_requirements(
&mut self,
info: ImageAllocationInfo,
) -> RutabagaResult<ImageMemoryRequirements> {
let mut reqs: ImageMemoryRequirements = Default::default();
let (unsafe_image, memory_requirements) = unsafe { self.create_image(info)? };
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let planar_layout = info.drm_format.planar_layout()?;
// Safe because we created the image with the linear bit set and verified the format is
// not a depth or stencil format. We are also using the correct image aspect. Vulkano
// will panic if we are not.
for plane in 0..planar_layout.num_planes {
let aspect = info.drm_format.vulkan_image_aspect(plane)?;
let layout = unsafe { unsafe_image.multiplane_color_layout(aspect) };
reqs.strides[plane] = layout.row_pitch as u32;
reqs.offsets[plane] = layout.offset as u32;
}
let need_visible = info.flags.host_visible();
let want_cached = info.flags.host_cached();
let memory_type = {
let filter = |current_type: MemoryType| {
if need_visible &&!current_type.is_host_visible() {
return AllocFromRequirementsFilter::Forbidden;
}
if!need_visible && current_type.is_device_local() {
return AllocFromRequirementsFilter::Preferred;
}
if need_visible && want_cached && current_type.is_host_cached() {
return AllocFromRequirementsFilter::Preferred;
}
if need_visible
&&!want_cached
&& current_type.is_host_coherent()
&&!current_type.is_host_cached()
{
return AllocFromRequirementsFilter::Preferred;
}
AllocFromRequirementsFilter::Allowed
};
let first_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Preferred));
let second_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Allowed));
first_loop
.chain(second_loop)
.filter(|&(t, _)| (memory_requirements.memory_type_bits & (1 << t.id()))!= 0)
.find(|&(t, rq)| filter(t) == rq)
.ok_or(RutabagaError::SpecViolation(
"unable to find required memory type",
))?
.0
};
reqs.info = info;
reqs.size = memory_requirements.size as u64;
if memory_type.is_host_visible() {
if memory_type.is_host_cached() {
reqs.map_info = RUTABAGA_MAP_CACHE_CACHED;
} else if memory_type.is_host_coherent() {
reqs.map_info = RUTABAGA_MAP_CACHE_WC;
}
}
reqs.vulkan_info = Some(VulkanInfo {
memory_idx: memory_type.id() as u32,
physical_device_idx: device.physical_device().index() as u32,
});
Ok(reqs)
}
fn allocate_memory(&mut self, reqs: ImageMemoryRequirements) -> RutabagaResult<RutabagaHandle> {
let (unsafe_image, memory_requirements) = unsafe { self.create_image(reqs.info)? };
let vulkan_info = reqs.vulkan_info.ok_or(RutabagaError::InvalidVulkanInfo)?;
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let memory_type = device
.physical_device()
.memory_type_by_id(vulkan_info.memory_idx)
.ok_or(RutabagaError::InvalidVulkanInfo)?;
let (handle_type, rutabaga_type) =
match device.enabled_extensions().ext_external_memory_dma_buf {
true => (
ExternalMemoryHandleType {
dma_buf: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_DMABUF,
),
false => (
ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD,
),
};
let dedicated = match device.enabled_extensions().khr_dedicated_allocation {
true => {
if memory_requirements.prefer_dedicated {
DedicatedAlloc::Image(&unsafe_image)
} else {
DedicatedAlloc::None
}
}
false => DedicatedAlloc::None,
};
let device_memory = DeviceMemoryBuilder::new(device.clone(), memory_type.id(), reqs.size)
.dedicated_info(dedicated)
.export_info(handle_type)
.build()?;
let descriptor = device_memory.export_fd(handle_type)?.into();
Ok(RutabagaHandle {
os_handle: descriptor,
handle_type: rutabaga_type,
})
}
/// Implementations must map the memory associated with the `resource_id` upon success.
fn import_and_map(
&mut self,
handle: RutabagaHandle,
vulkan_info: VulkanInfo,
size: u64,
) -> RutabagaResult<Box<dyn MappedRegion>> {
let device = self
.devices
.values()
.find(|device| {
device.physical_device().index() as u32 == vulkan_info.physical_device_idx
})
.ok_or(RutabagaError::InvalidVulkanInfo)?;
let handle_type = match handle.handle_type {
RUTABAGA_MEM_HANDLE_TYPE_DMABUF => ExternalMemoryHandleType {
dma_buf: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD => ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
},
_ => return Err(RutabagaError::InvalidRutabagaHandle),
};
let device_memory = DeviceMemoryBuilder::new(device.clone(), vulkan_info.memory_idx, size)
.import_info(handle.os_handle.into(), handle_type)
.build()?;
let mapping = DeviceMemoryMapping::new(device.clone(), device_memory.clone(), 0, size, 0)?;
Ok(Box::new(VulkanoMapping::new(mapping, size.try_into()?)))
}
}
// Vulkano should really define an universal type that wraps all these errors, say
// "VulkanoError(e)".
impl From<InstanceCreationError> for RutabagaError {
fn from(e: InstanceCreationError) -> RutabagaError {
RutabagaError::VkInstanceCreationError(e)
}
}
impl From<ImageCreationError> for RutabagaError {
fn from(e: ImageCreationError) -> RutabagaError {
RutabagaError::VkImageCreationError(e)
}
}
impl From<DeviceCreationError> for RutabagaError {
fn from(e: DeviceCreationError) -> RutabagaError {
RutabagaError::VkDeviceCreationError(e)
}
}
impl From<DeviceMemoryAllocError> for RutabagaError {
fn from(e: DeviceMemoryAllocError) -> RutabagaError {
RutabagaError::VkDeviceMemoryAllocError(e)
}
}
|
{
for device in self.devices.values() {
if !device.enabled_extensions().ext_external_memory_dma_buf {
return false;
}
}
true
}
|
identifier_body
|
vulkano_gralloc.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! vulkano_gralloc: Implements swapchain allocation and memory mapping
//! using Vulkano.
//!
//! External code found at https://github.com/vulkano-rs/vulkano.
#![cfg(feature = "vulkano")]
use std::collections::BTreeMap as Map;
use std::convert::TryInto;
use std::iter::Empty;
use std::sync::Arc;
use base::MappedRegion;
use crate::rutabaga_gralloc::gralloc::{Gralloc, ImageAllocationInfo, ImageMemoryRequirements};
use crate::rutabaga_utils::*;
use vulkano::device::physical::{MemoryType, PhysicalDevice, PhysicalDeviceType};
use vulkano::device::{Device, DeviceCreationError, DeviceExtensions};
use vulkano::image::{
sys, ImageCreateFlags, ImageCreationError, ImageDimensions, ImageUsage, SampleCount,
};
use vulkano::instance::{Instance, InstanceCreationError, InstanceExtensions, Version};
use vulkano::memory::{
DedicatedAlloc, DeviceMemoryAllocError, DeviceMemoryBuilder, DeviceMemoryMapping,
ExternalMemoryHandleType, MemoryRequirements,
};
use vulkano::memory::pool::AllocFromRequirementsFilter;
use vulkano::sync::Sharing;
/// A gralloc implementation capable of allocation `VkDeviceMemory`.
pub struct VulkanoGralloc {
devices: Map<PhysicalDeviceType, Arc<Device>>,
has_integrated_gpu: bool,
}
struct VulkanoMapping {
mapping: DeviceMemoryMapping,
size: usize,
}
impl VulkanoMapping {
pub fn new(mapping: DeviceMemoryMapping, size: usize) -> VulkanoMapping {
VulkanoMapping { mapping, size }
}
}
unsafe impl MappedRegion for VulkanoMapping {
/// Used for passing this region for hypervisor memory mappings. We trust crosvm to use this
/// safely.
fn as_ptr(&self) -> *mut u8 {
unsafe { self.mapping.as_ptr() }
}
/// Returns the size of the memory region in bytes.
fn size(&self) -> usize {
self.size
}
}
impl VulkanoGralloc {
/// Returns a new `VulkanGralloc' instance upon success.
pub fn init() -> RutabagaResult<Box<dyn Gralloc>> {
// Initialization copied from triangle.rs in Vulkano. Look there for a more detailed
// explanation of VK initialization.
let instance_extensions = InstanceExtensions {
khr_external_memory_capabilities: true,
khr_get_physical_device_properties2: true,
..InstanceExtensions::none()
};
let instance = Instance::new(None, Version::V1_1, &instance_extensions, None)?;
let mut devices: Map<PhysicalDeviceType, Arc<Device>> = Default::default();
let mut has_integrated_gpu = false;
for physical in PhysicalDevice::enumerate(&instance) {
let queue_family = physical
.queue_families()
.find(|&q| {
// We take the first queue family that supports graphics.
q.supports_graphics()
})
.ok_or(RutabagaError::SpecViolation(
"need graphics queue family to proceed",
))?;
let supported_extensions = physical.supported_extensions();
let desired_extensions = DeviceExtensions {
khr_dedicated_allocation: true,
khr_get_memory_requirements2: true,
khr_external_memory: true,
khr_external_memory_fd: true,
ext_external_memory_dma_buf: true,
..DeviceExtensions::none()
};
let intersection = supported_extensions.intersection(&desired_extensions);
if let Ok(device, mut _queues) = Device::new(
physical,
physical.supported_features(),
&intersection,
[(queue_family, 0.5)].iter().cloned(),
) {
let device_type = device.physical_device().properties().device_type;
if device_type == PhysicalDeviceType::IntegratedGpu {
has_integrated_gpu = true
}
// If we have two devices of the same type (two integrated GPUs), the old value is
// dropped. Vulkano is verbose enough such that a keener selection algorithm may
// be used, but the need for such complexity does not seem to exist now.
devices.insert(device_type, device);
};
}
if devices.is_empty() {
return Err(RutabagaError::SpecViolation(
"no matching VK devices available",
));
}
Ok(Box::new(VulkanoGralloc {
devices,
has_integrated_gpu,
}))
}
// This function is used safely in this module because gralloc does not:
//
// (1) bind images to any memory.
// (2) transition the layout of images.
// (3) transfer ownership of images between queues.
//
// In addition, we trust Vulkano to validate image parameters are within the Vulkan spec.
unsafe fn create_image(
&mut self,
info: ImageAllocationInfo,
) -> RutabagaResult<(sys::UnsafeImage, MemoryRequirements)> {
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let usage = match info.flags.uses_rendering() {
true => ImageUsage {
color_attachment: true,
..ImageUsage::none()
},
false => ImageUsage {
sampled: true,
..ImageUsage::none()
},
};
// Reasonable bounds on image width.
if info.width == 0 || info.width > 4096 {
return Err(RutabagaError::InvalidGrallocDimensions);
}
// Reasonable bounds on image height.
if info.height == 0 || info.height > 4096 {
return Err(RutabagaError::InvalidGrallocDimensions);
}
let vulkan_format = info.drm_format.vulkan_format()?;
let (unsafe_image, memory_requirements) = sys::UnsafeImage::new(
device.clone(),
usage,
vulkan_format,
ImageCreateFlags::none(),
ImageDimensions::Dim2d {
width: info.width,
height: info.height,
array_layers: 1,
},
SampleCount::Sample1,
1, /* mipmap count */
Sharing::Exclusive::<Empty<_>>,
true, /* linear images only currently */
false, /* not preinitialized */
)?;
Ok((unsafe_image, memory_requirements))
}
}
impl Gralloc for VulkanoGralloc {
fn supports_external_gpu_memory(&self) -> bool {
for device in self.devices.values() {
if!device.enabled_extensions().khr_external_memory {
return false;
}
}
true
}
fn
|
(&self) -> bool {
for device in self.devices.values() {
if!device.enabled_extensions().ext_external_memory_dma_buf {
return false;
}
}
true
}
fn get_image_memory_requirements(
&mut self,
info: ImageAllocationInfo,
) -> RutabagaResult<ImageMemoryRequirements> {
let mut reqs: ImageMemoryRequirements = Default::default();
let (unsafe_image, memory_requirements) = unsafe { self.create_image(info)? };
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let planar_layout = info.drm_format.planar_layout()?;
// Safe because we created the image with the linear bit set and verified the format is
// not a depth or stencil format. We are also using the correct image aspect. Vulkano
// will panic if we are not.
for plane in 0..planar_layout.num_planes {
let aspect = info.drm_format.vulkan_image_aspect(plane)?;
let layout = unsafe { unsafe_image.multiplane_color_layout(aspect) };
reqs.strides[plane] = layout.row_pitch as u32;
reqs.offsets[plane] = layout.offset as u32;
}
let need_visible = info.flags.host_visible();
let want_cached = info.flags.host_cached();
let memory_type = {
let filter = |current_type: MemoryType| {
if need_visible &&!current_type.is_host_visible() {
return AllocFromRequirementsFilter::Forbidden;
}
if!need_visible && current_type.is_device_local() {
return AllocFromRequirementsFilter::Preferred;
}
if need_visible && want_cached && current_type.is_host_cached() {
return AllocFromRequirementsFilter::Preferred;
}
if need_visible
&&!want_cached
&& current_type.is_host_coherent()
&&!current_type.is_host_cached()
{
return AllocFromRequirementsFilter::Preferred;
}
AllocFromRequirementsFilter::Allowed
};
let first_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Preferred));
let second_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Allowed));
first_loop
.chain(second_loop)
.filter(|&(t, _)| (memory_requirements.memory_type_bits & (1 << t.id()))!= 0)
.find(|&(t, rq)| filter(t) == rq)
.ok_or(RutabagaError::SpecViolation(
"unable to find required memory type",
))?
.0
};
reqs.info = info;
reqs.size = memory_requirements.size as u64;
if memory_type.is_host_visible() {
if memory_type.is_host_cached() {
reqs.map_info = RUTABAGA_MAP_CACHE_CACHED;
} else if memory_type.is_host_coherent() {
reqs.map_info = RUTABAGA_MAP_CACHE_WC;
}
}
reqs.vulkan_info = Some(VulkanInfo {
memory_idx: memory_type.id() as u32,
physical_device_idx: device.physical_device().index() as u32,
});
Ok(reqs)
}
fn allocate_memory(&mut self, reqs: ImageMemoryRequirements) -> RutabagaResult<RutabagaHandle> {
let (unsafe_image, memory_requirements) = unsafe { self.create_image(reqs.info)? };
let vulkan_info = reqs.vulkan_info.ok_or(RutabagaError::InvalidVulkanInfo)?;
let device = if self.has_integrated_gpu {
self.devices
.get(&PhysicalDeviceType::IntegratedGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
} else {
self.devices
.get(&PhysicalDeviceType::DiscreteGpu)
.ok_or(RutabagaError::InvalidGrallocGpuType)?
};
let memory_type = device
.physical_device()
.memory_type_by_id(vulkan_info.memory_idx)
.ok_or(RutabagaError::InvalidVulkanInfo)?;
let (handle_type, rutabaga_type) =
match device.enabled_extensions().ext_external_memory_dma_buf {
true => (
ExternalMemoryHandleType {
dma_buf: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_DMABUF,
),
false => (
ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD,
),
};
let dedicated = match device.enabled_extensions().khr_dedicated_allocation {
true => {
if memory_requirements.prefer_dedicated {
DedicatedAlloc::Image(&unsafe_image)
} else {
DedicatedAlloc::None
}
}
false => DedicatedAlloc::None,
};
let device_memory = DeviceMemoryBuilder::new(device.clone(), memory_type.id(), reqs.size)
.dedicated_info(dedicated)
.export_info(handle_type)
.build()?;
let descriptor = device_memory.export_fd(handle_type)?.into();
Ok(RutabagaHandle {
os_handle: descriptor,
handle_type: rutabaga_type,
})
}
/// Implementations must map the memory associated with the `resource_id` upon success.
fn import_and_map(
&mut self,
handle: RutabagaHandle,
vulkan_info: VulkanInfo,
size: u64,
) -> RutabagaResult<Box<dyn MappedRegion>> {
let device = self
.devices
.values()
.find(|device| {
device.physical_device().index() as u32 == vulkan_info.physical_device_idx
})
.ok_or(RutabagaError::InvalidVulkanInfo)?;
let handle_type = match handle.handle_type {
RUTABAGA_MEM_HANDLE_TYPE_DMABUF => ExternalMemoryHandleType {
dma_buf: true,
..ExternalMemoryHandleType::none()
},
RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD => ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
},
_ => return Err(RutabagaError::InvalidRutabagaHandle),
};
let device_memory = DeviceMemoryBuilder::new(device.clone(), vulkan_info.memory_idx, size)
.import_info(handle.os_handle.into(), handle_type)
.build()?;
let mapping = DeviceMemoryMapping::new(device.clone(), device_memory.clone(), 0, size, 0)?;
Ok(Box::new(VulkanoMapping::new(mapping, size.try_into()?)))
}
}
// Vulkano should really define an universal type that wraps all these errors, say
// "VulkanoError(e)".
impl From<InstanceCreationError> for RutabagaError {
fn from(e: InstanceCreationError) -> RutabagaError {
RutabagaError::VkInstanceCreationError(e)
}
}
impl From<ImageCreationError> for RutabagaError {
fn from(e: ImageCreationError) -> RutabagaError {
RutabagaError::VkImageCreationError(e)
}
}
impl From<DeviceCreationError> for RutabagaError {
fn from(e: DeviceCreationError) -> RutabagaError {
RutabagaError::VkDeviceCreationError(e)
}
}
impl From<DeviceMemoryAllocError> for RutabagaError {
fn from(e: DeviceMemoryAllocError) -> RutabagaError {
RutabagaError::VkDeviceMemoryAllocError(e)
}
}
|
supports_dmabuf
|
identifier_name
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use extra::getopts;
use std::result;
use std::uint;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
urls: ~[~str],
/// The rendering backend to use (`-r`).
render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
tile_size: uint,
/// `None` to disable the profiler or `Some` with an interval in seconds to enable it and cause
/// it to produce output on that interval (`-p`).
profiler_period: Option<f64>,
/// True to exit after the page load (`-x`).
exit_after_load: bool,
output_file: Option<~str>,
}
pub fn from_cmdline_args(args: &[~str]) -> Opts {
|
use extra::getopts;
let args = args.tail();
let opts = ~[
getopts::optflag("c"), // CPU rendering
getopts::optopt("o"), // output file
getopts::optopt("r"), // rendering backend
getopts::optopt("s"), // size of tiles
getopts::optopt("t"), // threads to render with
getopts::optflagopt("p"), // profiler flag and output interval
getopts::optflag("x"), // exit after load flag
];
let opt_match = match getopts::getopts(args, opts) {
result::Ok(m) => m,
result::Err(f) => fail!(f.to_err_msg()),
};
let urls = if opt_match.free.is_empty() {
fail!(~"servo asks that you provide 1 or more URLs")
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if backend_str == ~"direct2d" {
Direct2DBackend
} else if backend_str == ~"core-graphics" {
CoreGraphicsBackend
} else if backend_str == ~"core-graphics-accelerated" {
CoreGraphicsAcceleratedBackend
} else if backend_str == ~"cairo" {
CairoBackend
} else if backend_str == ~"skia" {
SkiaBackend
} else {
fail!(~"unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => FromStr::from_str(tile_size_str).unwrap(),
None => 512,
};
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => FromStr::from_str(n_render_threads_str).unwrap(),
None => 1, // FIXME: Number of cores.
};
// if only flag is present, default to 5 second period
let profiler_period = do opt_match.opt_default("p", "5").map |period| {
FromStr::from_str(*period).unwrap()
};
let exit_after_load = opt_match.opt_present("x");
let output_file = opt_match.opt_str("o");
let cpu_painting = opt_match.opt_present("c");
Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
profiler_period: profiler_period,
exit_after_load: exit_after_load,
output_file: output_file,
}
}
|
random_line_split
|
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use extra::getopts;
use std::result;
use std::uint;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
urls: ~[~str],
/// The rendering backend to use (`-r`).
render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
tile_size: uint,
/// `None` to disable the profiler or `Some` with an interval in seconds to enable it and cause
/// it to produce output on that interval (`-p`).
profiler_period: Option<f64>,
/// True to exit after the page load (`-x`).
exit_after_load: bool,
output_file: Option<~str>,
}
pub fn
|
(args: &[~str]) -> Opts {
use extra::getopts;
let args = args.tail();
let opts = ~[
getopts::optflag("c"), // CPU rendering
getopts::optopt("o"), // output file
getopts::optopt("r"), // rendering backend
getopts::optopt("s"), // size of tiles
getopts::optopt("t"), // threads to render with
getopts::optflagopt("p"), // profiler flag and output interval
getopts::optflag("x"), // exit after load flag
];
let opt_match = match getopts::getopts(args, opts) {
result::Ok(m) => m,
result::Err(f) => fail!(f.to_err_msg()),
};
let urls = if opt_match.free.is_empty() {
fail!(~"servo asks that you provide 1 or more URLs")
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if backend_str == ~"direct2d" {
Direct2DBackend
} else if backend_str == ~"core-graphics" {
CoreGraphicsBackend
} else if backend_str == ~"core-graphics-accelerated" {
CoreGraphicsAcceleratedBackend
} else if backend_str == ~"cairo" {
CairoBackend
} else if backend_str == ~"skia" {
SkiaBackend
} else {
fail!(~"unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => FromStr::from_str(tile_size_str).unwrap(),
None => 512,
};
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => FromStr::from_str(n_render_threads_str).unwrap(),
None => 1, // FIXME: Number of cores.
};
// if only flag is present, default to 5 second period
let profiler_period = do opt_match.opt_default("p", "5").map |period| {
FromStr::from_str(*period).unwrap()
};
let exit_after_load = opt_match.opt_present("x");
let output_file = opt_match.opt_str("o");
let cpu_painting = opt_match.opt_present("c");
Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
profiler_period: profiler_period,
exit_after_load: exit_after_load,
output_file: output_file,
}
}
|
from_cmdline_args
|
identifier_name
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use extra::getopts;
use std::result;
use std::uint;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
urls: ~[~str],
/// The rendering backend to use (`-r`).
render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
tile_size: uint,
/// `None` to disable the profiler or `Some` with an interval in seconds to enable it and cause
/// it to produce output on that interval (`-p`).
profiler_period: Option<f64>,
/// True to exit after the page load (`-x`).
exit_after_load: bool,
output_file: Option<~str>,
}
pub fn from_cmdline_args(args: &[~str]) -> Opts {
use extra::getopts;
let args = args.tail();
let opts = ~[
getopts::optflag("c"), // CPU rendering
getopts::optopt("o"), // output file
getopts::optopt("r"), // rendering backend
getopts::optopt("s"), // size of tiles
getopts::optopt("t"), // threads to render with
getopts::optflagopt("p"), // profiler flag and output interval
getopts::optflag("x"), // exit after load flag
];
let opt_match = match getopts::getopts(args, opts) {
result::Ok(m) => m,
result::Err(f) => fail!(f.to_err_msg()),
};
let urls = if opt_match.free.is_empty() {
fail!(~"servo asks that you provide 1 or more URLs")
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if backend_str == ~"direct2d" {
Direct2DBackend
} else if backend_str == ~"core-graphics" {
CoreGraphicsBackend
} else if backend_str == ~"core-graphics-accelerated" {
CoreGraphicsAcceleratedBackend
} else if backend_str == ~"cairo" {
CairoBackend
} else if backend_str == ~"skia" {
SkiaBackend
} else
|
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => FromStr::from_str(tile_size_str).unwrap(),
None => 512,
};
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => FromStr::from_str(n_render_threads_str).unwrap(),
None => 1, // FIXME: Number of cores.
};
// if only flag is present, default to 5 second period
let profiler_period = do opt_match.opt_default("p", "5").map |period| {
FromStr::from_str(*period).unwrap()
};
let exit_after_load = opt_match.opt_present("x");
let output_file = opt_match.opt_str("o");
let cpu_painting = opt_match.opt_present("c");
Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
profiler_period: profiler_period,
exit_after_load: exit_after_load,
output_file: output_file,
}
}
|
{
fail!(~"unknown backend type")
}
|
conditional_block
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use extra::getopts;
use std::result;
use std::uint;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
urls: ~[~str],
/// The rendering backend to use (`-r`).
render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
tile_size: uint,
/// `None` to disable the profiler or `Some` with an interval in seconds to enable it and cause
/// it to produce output on that interval (`-p`).
profiler_period: Option<f64>,
/// True to exit after the page load (`-x`).
exit_after_load: bool,
output_file: Option<~str>,
}
pub fn from_cmdline_args(args: &[~str]) -> Opts
|
let urls = if opt_match.free.is_empty() {
fail!(~"servo asks that you provide 1 or more URLs")
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if backend_str == ~"direct2d" {
Direct2DBackend
} else if backend_str == ~"core-graphics" {
CoreGraphicsBackend
} else if backend_str == ~"core-graphics-accelerated" {
CoreGraphicsAcceleratedBackend
} else if backend_str == ~"cairo" {
CairoBackend
} else if backend_str == ~"skia" {
SkiaBackend
} else {
fail!(~"unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => FromStr::from_str(tile_size_str).unwrap(),
None => 512,
};
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => FromStr::from_str(n_render_threads_str).unwrap(),
None => 1, // FIXME: Number of cores.
};
// if only flag is present, default to 5 second period
let profiler_period = do opt_match.opt_default("p", "5").map |period| {
FromStr::from_str(*period).unwrap()
};
let exit_after_load = opt_match.opt_present("x");
let output_file = opt_match.opt_str("o");
let cpu_painting = opt_match.opt_present("c");
Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
profiler_period: profiler_period,
exit_after_load: exit_after_load,
output_file: output_file,
}
}
|
{
use extra::getopts;
let args = args.tail();
let opts = ~[
getopts::optflag("c"), // CPU rendering
getopts::optopt("o"), // output file
getopts::optopt("r"), // rendering backend
getopts::optopt("s"), // size of tiles
getopts::optopt("t"), // threads to render with
getopts::optflagopt("p"), // profiler flag and output interval
getopts::optflag("x"), // exit after load flag
];
let opt_match = match getopts::getopts(args, opts) {
result::Ok(m) => m,
result::Err(f) => fail!(f.to_err_msg()),
};
|
identifier_body
|
partial_ord.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::OrderingOp::*;
use ast;
use ast::{MetaItem, Expr};
use codemap::Span;
use ext::base::{ExtCtxt, Annotatable};
use ext::build::AstBuilder;
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
use ptr::P;
pub fn expand_deriving_partial_ord(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: Annotatable,
push: &mut FnMut(Annotatable))
{
macro_rules! md {
($name:expr, $op:expr, $equal:expr) => { {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
MethodDef {
name: $name,
generics: LifetimeBounds::empty(),
explicit_self: borrowed_explicit_self(),
args: vec!(borrowed_self()),
ret_ty: Literal(path_local!(bool)),
attributes: attrs,
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
cs_op($op, $equal, cx, span, substr)
}))
}
} }
}
let ordering_ty = Literal(path_std!(cx, core::cmp::Ordering));
let ret_ty = Literal(Path::new_(pathvec_std!(cx, core::option::Option),
None,
vec![Box::new(ordering_ty)],
true));
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let partial_cmp_def = MethodDef {
name: "partial_cmp",
generics: LifetimeBounds::empty(),
explicit_self: borrowed_explicit_self(),
args: vec![borrowed_self()],
ret_ty: ret_ty,
attributes: attrs,
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
cs_partial_cmp(cx, span, substr)
}))
};
let trait_def = TraitDef {
span: span,
attributes: vec![],
path: path_std!(cx, core::cmp::PartialOrd),
additional_bounds: vec![],
generics: LifetimeBounds::empty(),
methods: vec![
partial_cmp_def,
md!("lt", true, false),
md!("le", true, true),
md!("gt", false, false),
md!("ge", false, true)
],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, &item, push)
}
#[derive(Copy, Clone)]
pub enum OrderingOp {
PartialCmpOp, LtOp, LeOp, GtOp, GeOp,
}
pub fn
|
(cx: &mut ExtCtxt,
span: Span,
op: OrderingOp,
self_arg_tags: &[ast::Ident]) -> P<ast::Expr> {
let lft = cx.expr_ident(span, self_arg_tags[0]);
let rgt = cx.expr_addr_of(span, cx.expr_ident(span, self_arg_tags[1]));
let op_str = match op {
PartialCmpOp => "partial_cmp",
LtOp => "lt", LeOp => "le",
GtOp => "gt", GeOp => "ge",
};
cx.expr_method_call(span, lft, cx.ident_of(op_str), vec![rgt])
}
pub fn cs_partial_cmp(cx: &mut ExtCtxt, span: Span,
substr: &Substructure) -> P<Expr> {
let test_id = cx.ident_of("__test");
let ordering = cx.path_global(span,
vec!(cx.ident_of_std("core"),
cx.ident_of("cmp"),
cx.ident_of("Ordering"),
cx.ident_of("Equal")));
let ordering = cx.expr_path(ordering);
let equals_expr = cx.expr_some(span, ordering);
let partial_cmp_path = vec![
cx.ident_of_std("core"),
cx.ident_of("cmp"),
cx.ident_of("PartialOrd"),
cx.ident_of("partial_cmp"),
];
/*
Builds:
let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field1, &other_field1);
if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field2, &other_field2);
if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
...
} else {
__test
}
} else {
__test
}
FIXME #6449: These `if`s could/should be `match`es.
*/
cs_fold(
// foldr nests the if-elses correctly, leaving the first field
// as the outermost one, and the last as the innermost.
false,
|cx, span, old, self_f, other_fs| {
// let __test = new;
// if __test == Some(::std::cmp::Ordering::Equal) {
// old
// } else {
// __test
// }
let new = {
let other_f = match (other_fs.len(), other_fs.get(0)) {
(1, Some(o_f)) => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"),
};
let args = vec![
cx.expr_addr_of(span, self_f),
cx.expr_addr_of(span, other_f.clone()),
];
cx.expr_call_global(span, partial_cmp_path.clone(), args)
};
let assign = cx.stmt_let(span, false, test_id, new);
let cond = cx.expr_binary(span, ast::BiEq,
cx.expr_ident(span, test_id),
equals_expr.clone());
let if_ = cx.expr_if(span,
cond,
old, Some(cx.expr_ident(span, test_id)));
cx.expr_block(cx.block(span, vec!(assign), Some(if_)))
},
equals_expr.clone(),
Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
if self_args.len()!= 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
some_ordering_collapsed(cx, span, PartialCmpOp, tag_tuple)
}
}),
cx, span, substr)
}
/// Strict inequality.
fn cs_op(less: bool, equal: bool, cx: &mut ExtCtxt,
span: Span, substr: &Substructure) -> P<Expr> {
let op = if less {ast::BiLt} else {ast::BiGt};
cs_fold(
false, // need foldr,
|cx, span, subexpr, self_f, other_fs| {
/*
build up a series of chain ||'s and &&'s from the inside
out (hence foldr) to get lexical ordering, i.e. for op ==
`ast::lt`
```
self.f1 < other.f1 || (!(other.f1 < self.f1) &&
(self.f2 < other.f2 || (!(other.f2 < self.f2) &&
(false)
))
)
```
The optimiser should remove the redundancy. We explicitly
get use the binops to avoid auto-deref dereferencing too many
layers of pointers, if the type includes pointers.
*/
let other_f = match (other_fs.len(), other_fs.get(0)) {
(1, Some(o_f)) => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
};
let cmp = cx.expr_binary(span, op, self_f.clone(), other_f.clone());
let not_cmp = cx.expr_unary(span, ast::UnNot,
cx.expr_binary(span, op, other_f.clone(), self_f));
let and = cx.expr_binary(span, ast::BiAnd, not_cmp, subexpr);
cx.expr_binary(span, ast::BiOr, cmp, and)
},
cx.expr_bool(span, equal),
Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
if self_args.len()!= 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
let op = match (less, equal) {
(true, true) => LeOp, (true, false) => LtOp,
(false, true) => GeOp, (false, false) => GtOp,
};
some_ordering_collapsed(cx, span, op, tag_tuple)
}
}),
cx, span, substr)
}
|
some_ordering_collapsed
|
identifier_name
|
partial_ord.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::OrderingOp::*;
use ast;
use ast::{MetaItem, Expr};
use codemap::Span;
use ext::base::{ExtCtxt, Annotatable};
use ext::build::AstBuilder;
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
use ptr::P;
pub fn expand_deriving_partial_ord(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: Annotatable,
push: &mut FnMut(Annotatable))
|
let ordering_ty = Literal(path_std!(cx, core::cmp::Ordering));
let ret_ty = Literal(Path::new_(pathvec_std!(cx, core::option::Option),
None,
vec![Box::new(ordering_ty)],
true));
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let partial_cmp_def = MethodDef {
name: "partial_cmp",
generics: LifetimeBounds::empty(),
explicit_self: borrowed_explicit_self(),
args: vec![borrowed_self()],
ret_ty: ret_ty,
attributes: attrs,
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
cs_partial_cmp(cx, span, substr)
}))
};
let trait_def = TraitDef {
span: span,
attributes: vec![],
path: path_std!(cx, core::cmp::PartialOrd),
additional_bounds: vec![],
generics: LifetimeBounds::empty(),
methods: vec![
partial_cmp_def,
md!("lt", true, false),
md!("le", true, true),
md!("gt", false, false),
md!("ge", false, true)
],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, &item, push)
}
#[derive(Copy, Clone)]
pub enum OrderingOp {
PartialCmpOp, LtOp, LeOp, GtOp, GeOp,
}
pub fn some_ordering_collapsed(cx: &mut ExtCtxt,
span: Span,
op: OrderingOp,
self_arg_tags: &[ast::Ident]) -> P<ast::Expr> {
let lft = cx.expr_ident(span, self_arg_tags[0]);
let rgt = cx.expr_addr_of(span, cx.expr_ident(span, self_arg_tags[1]));
let op_str = match op {
PartialCmpOp => "partial_cmp",
LtOp => "lt", LeOp => "le",
GtOp => "gt", GeOp => "ge",
};
cx.expr_method_call(span, lft, cx.ident_of(op_str), vec![rgt])
}
pub fn cs_partial_cmp(cx: &mut ExtCtxt, span: Span,
substr: &Substructure) -> P<Expr> {
let test_id = cx.ident_of("__test");
let ordering = cx.path_global(span,
vec!(cx.ident_of_std("core"),
cx.ident_of("cmp"),
cx.ident_of("Ordering"),
cx.ident_of("Equal")));
let ordering = cx.expr_path(ordering);
let equals_expr = cx.expr_some(span, ordering);
let partial_cmp_path = vec![
cx.ident_of_std("core"),
cx.ident_of("cmp"),
cx.ident_of("PartialOrd"),
cx.ident_of("partial_cmp"),
];
/*
Builds:
let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field1, &other_field1);
if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field2, &other_field2);
if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
...
} else {
__test
}
} else {
__test
}
FIXME #6449: These `if`s could/should be `match`es.
*/
cs_fold(
// foldr nests the if-elses correctly, leaving the first field
// as the outermost one, and the last as the innermost.
false,
|cx, span, old, self_f, other_fs| {
// let __test = new;
// if __test == Some(::std::cmp::Ordering::Equal) {
// old
// } else {
// __test
// }
let new = {
let other_f = match (other_fs.len(), other_fs.get(0)) {
(1, Some(o_f)) => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"),
};
let args = vec![
cx.expr_addr_of(span, self_f),
cx.expr_addr_of(span, other_f.clone()),
];
cx.expr_call_global(span, partial_cmp_path.clone(), args)
};
let assign = cx.stmt_let(span, false, test_id, new);
let cond = cx.expr_binary(span, ast::BiEq,
cx.expr_ident(span, test_id),
equals_expr.clone());
let if_ = cx.expr_if(span,
cond,
old, Some(cx.expr_ident(span, test_id)));
cx.expr_block(cx.block(span, vec!(assign), Some(if_)))
},
equals_expr.clone(),
Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
if self_args.len()!= 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
some_ordering_collapsed(cx, span, PartialCmpOp, tag_tuple)
}
}),
cx, span, substr)
}
/// Strict inequality.
fn cs_op(less: bool, equal: bool, cx: &mut ExtCtxt,
span: Span, substr: &Substructure) -> P<Expr> {
let op = if less {ast::BiLt} else {ast::BiGt};
cs_fold(
false, // need foldr,
|cx, span, subexpr, self_f, other_fs| {
/*
build up a series of chain ||'s and &&'s from the inside
out (hence foldr) to get lexical ordering, i.e. for op ==
`ast::lt`
```
self.f1 < other.f1 || (!(other.f1 < self.f1) &&
(self.f2 < other.f2 || (!(other.f2 < self.f2) &&
(false)
))
)
```
The optimiser should remove the redundancy. We explicitly
get use the binops to avoid auto-deref dereferencing too many
layers of pointers, if the type includes pointers.
*/
let other_f = match (other_fs.len(), other_fs.get(0)) {
(1, Some(o_f)) => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
};
let cmp = cx.expr_binary(span, op, self_f.clone(), other_f.clone());
let not_cmp = cx.expr_unary(span, ast::UnNot,
cx.expr_binary(span, op, other_f.clone(), self_f));
let and = cx.expr_binary(span, ast::BiAnd, not_cmp, subexpr);
cx.expr_binary(span, ast::BiOr, cmp, and)
},
cx.expr_bool(span, equal),
Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
if self_args.len()!= 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
let op = match (less, equal) {
(true, true) => LeOp, (true, false) => LtOp,
(false, true) => GeOp, (false, false) => GtOp,
};
some_ordering_collapsed(cx, span, op, tag_tuple)
}
}),
cx, span, substr)
}
|
{
macro_rules! md {
($name:expr, $op:expr, $equal:expr) => { {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
MethodDef {
name: $name,
generics: LifetimeBounds::empty(),
explicit_self: borrowed_explicit_self(),
args: vec!(borrowed_self()),
ret_ty: Literal(path_local!(bool)),
attributes: attrs,
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
cs_op($op, $equal, cx, span, substr)
}))
}
} }
}
|
identifier_body
|
partial_ord.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::OrderingOp::*;
use ast;
use ast::{MetaItem, Expr};
use codemap::Span;
use ext::base::{ExtCtxt, Annotatable};
use ext::build::AstBuilder;
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
use ptr::P;
pub fn expand_deriving_partial_ord(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: Annotatable,
push: &mut FnMut(Annotatable))
{
macro_rules! md {
($name:expr, $op:expr, $equal:expr) => { {
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
MethodDef {
name: $name,
generics: LifetimeBounds::empty(),
explicit_self: borrowed_explicit_self(),
args: vec!(borrowed_self()),
ret_ty: Literal(path_local!(bool)),
attributes: attrs,
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
cs_op($op, $equal, cx, span, substr)
}))
}
} }
}
let ordering_ty = Literal(path_std!(cx, core::cmp::Ordering));
let ret_ty = Literal(Path::new_(pathvec_std!(cx, core::option::Option),
None,
vec![Box::new(ordering_ty)],
true));
let inline = cx.meta_word(span, InternedString::new("inline"));
let attrs = vec!(cx.attribute(span, inline));
let partial_cmp_def = MethodDef {
name: "partial_cmp",
generics: LifetimeBounds::empty(),
explicit_self: borrowed_explicit_self(),
args: vec![borrowed_self()],
ret_ty: ret_ty,
attributes: attrs,
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
cs_partial_cmp(cx, span, substr)
}))
};
let trait_def = TraitDef {
span: span,
attributes: vec![],
path: path_std!(cx, core::cmp::PartialOrd),
additional_bounds: vec![],
generics: LifetimeBounds::empty(),
methods: vec![
partial_cmp_def,
md!("lt", true, false),
md!("le", true, true),
md!("gt", false, false),
md!("ge", false, true)
],
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, &item, push)
}
#[derive(Copy, Clone)]
pub enum OrderingOp {
PartialCmpOp, LtOp, LeOp, GtOp, GeOp,
}
pub fn some_ordering_collapsed(cx: &mut ExtCtxt,
span: Span,
op: OrderingOp,
self_arg_tags: &[ast::Ident]) -> P<ast::Expr> {
let lft = cx.expr_ident(span, self_arg_tags[0]);
let rgt = cx.expr_addr_of(span, cx.expr_ident(span, self_arg_tags[1]));
let op_str = match op {
PartialCmpOp => "partial_cmp",
LtOp => "lt", LeOp => "le",
GtOp => "gt", GeOp => "ge",
};
cx.expr_method_call(span, lft, cx.ident_of(op_str), vec![rgt])
}
pub fn cs_partial_cmp(cx: &mut ExtCtxt, span: Span,
substr: &Substructure) -> P<Expr> {
let test_id = cx.ident_of("__test");
let ordering = cx.path_global(span,
vec!(cx.ident_of_std("core"),
cx.ident_of("cmp"),
cx.ident_of("Ordering"),
cx.ident_of("Equal")));
let ordering = cx.expr_path(ordering);
let equals_expr = cx.expr_some(span, ordering);
let partial_cmp_path = vec![
cx.ident_of_std("core"),
cx.ident_of("cmp"),
cx.ident_of("PartialOrd"),
cx.ident_of("partial_cmp"),
];
/*
Builds:
let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field1, &other_field1);
if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
let __test = ::std::cmp::PartialOrd::partial_cmp(&self_field2, &other_field2);
if __test == ::std::option::Option::Some(::std::cmp::Ordering::Equal) {
...
} else {
__test
|
FIXME #6449: These `if`s could/should be `match`es.
*/
cs_fold(
// foldr nests the if-elses correctly, leaving the first field
// as the outermost one, and the last as the innermost.
false,
|cx, span, old, self_f, other_fs| {
// let __test = new;
// if __test == Some(::std::cmp::Ordering::Equal) {
// old
// } else {
// __test
// }
let new = {
let other_f = match (other_fs.len(), other_fs.get(0)) {
(1, Some(o_f)) => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"),
};
let args = vec![
cx.expr_addr_of(span, self_f),
cx.expr_addr_of(span, other_f.clone()),
];
cx.expr_call_global(span, partial_cmp_path.clone(), args)
};
let assign = cx.stmt_let(span, false, test_id, new);
let cond = cx.expr_binary(span, ast::BiEq,
cx.expr_ident(span, test_id),
equals_expr.clone());
let if_ = cx.expr_if(span,
cond,
old, Some(cx.expr_ident(span, test_id)));
cx.expr_block(cx.block(span, vec!(assign), Some(if_)))
},
equals_expr.clone(),
Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
if self_args.len()!= 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
some_ordering_collapsed(cx, span, PartialCmpOp, tag_tuple)
}
}),
cx, span, substr)
}
/// Strict inequality.
fn cs_op(less: bool, equal: bool, cx: &mut ExtCtxt,
span: Span, substr: &Substructure) -> P<Expr> {
let op = if less {ast::BiLt} else {ast::BiGt};
cs_fold(
false, // need foldr,
|cx, span, subexpr, self_f, other_fs| {
/*
build up a series of chain ||'s and &&'s from the inside
out (hence foldr) to get lexical ordering, i.e. for op ==
`ast::lt`
```
self.f1 < other.f1 || (!(other.f1 < self.f1) &&
(self.f2 < other.f2 || (!(other.f2 < self.f2) &&
(false)
))
)
```
The optimiser should remove the redundancy. We explicitly
get use the binops to avoid auto-deref dereferencing too many
layers of pointers, if the type includes pointers.
*/
let other_f = match (other_fs.len(), other_fs.get(0)) {
(1, Some(o_f)) => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
};
let cmp = cx.expr_binary(span, op, self_f.clone(), other_f.clone());
let not_cmp = cx.expr_unary(span, ast::UnNot,
cx.expr_binary(span, op, other_f.clone(), self_f));
let and = cx.expr_binary(span, ast::BiAnd, not_cmp, subexpr);
cx.expr_binary(span, ast::BiOr, cmp, and)
},
cx.expr_bool(span, equal),
Box::new(|cx, span, (self_args, tag_tuple), _non_self_args| {
if self_args.len()!= 2 {
cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`")
} else {
let op = match (less, equal) {
(true, true) => LeOp, (true, false) => LtOp,
(false, true) => GeOp, (false, false) => GtOp,
};
some_ordering_collapsed(cx, span, op, tag_tuple)
}
}),
cx, span, substr)
}
|
}
} else {
__test
}
|
random_line_split
|
fold.rs
|
use std::mem;
use {Task, Future, Poll, IntoFuture};
use stream::Stream;
/// A future used to collect all the results of a stream into one generic type.
///
/// This future is returned by the `Stream::fold` method.
pub struct
|
<S, F, Fut, T> where Fut: IntoFuture {
stream: S,
f: F,
state: State<T, Fut::Future>,
}
enum State<T, Fut> {
/// Placeholder state when doing work
Empty,
/// Ready to process the next stream item; current accumulator is the `T`
Ready(T),
/// Working on a future the process the previous stream item
Processing(Fut),
}
pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T>
where S: Stream,
F: FnMut(T, S::Item) -> Fut + Send +'static,
Fut: IntoFuture<Item = T>,
Fut::Error: Into<S::Error>,
T: Send +'static
{
Fold {
stream: s,
f: f,
state: State::Ready(t),
}
}
impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
where S: Stream,
F: FnMut(T, S::Item) -> Fut + Send +'static,
Fut: IntoFuture<Item = T>,
Fut::Error: Into<S::Error>,
T: Send +'static
{
type Item = T;
type Error = S::Error;
fn poll(&mut self, task: &mut Task) -> Poll<T, S::Error> {
loop {
match mem::replace(&mut self.state, State::Empty) {
State::Empty => panic!("cannot poll Fold twice"),
State::Ready(state) => {
match self.stream.poll(task) {
Poll::Ok(Some(e)) => {
let future = (self.f)(state, e);
self.state = State::Processing(future.into_future());
}
Poll::Ok(None) => return Poll::Ok(state),
Poll::Err(e) => return Poll::Err(e),
Poll::NotReady => {
self.state = State::Ready(state);
return Poll::NotReady
}
}
}
State::Processing(mut fut) => {
match fut.poll(task) {
Poll::Ok(state) => self.state = State::Ready(state),
Poll::Err(e) => return Poll::Err(e.into()),
Poll::NotReady => {
self.state = State::Processing(fut);
return Poll::NotReady;
}
}
}
}
}
}
fn schedule(&mut self, task: &mut Task) {
match self.state {
State::Empty => panic!("cannot `schedule` a completed Fold"),
State::Ready(_) => self.stream.schedule(task),
State::Processing(ref mut fut) => fut.schedule(task),
}
}
}
|
Fold
|
identifier_name
|
fold.rs
|
use std::mem;
use {Task, Future, Poll, IntoFuture};
use stream::Stream;
/// A future used to collect all the results of a stream into one generic type.
///
/// This future is returned by the `Stream::fold` method.
pub struct Fold<S, F, Fut, T> where Fut: IntoFuture {
stream: S,
f: F,
state: State<T, Fut::Future>,
}
enum State<T, Fut> {
/// Placeholder state when doing work
Empty,
/// Ready to process the next stream item; current accumulator is the `T`
Ready(T),
/// Working on a future the process the previous stream item
Processing(Fut),
}
pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T>
where S: Stream,
F: FnMut(T, S::Item) -> Fut + Send +'static,
Fut: IntoFuture<Item = T>,
Fut::Error: Into<S::Error>,
T: Send +'static
{
Fold {
stream: s,
f: f,
state: State::Ready(t),
}
}
impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
where S: Stream,
F: FnMut(T, S::Item) -> Fut + Send +'static,
Fut: IntoFuture<Item = T>,
|
T: Send +'static
{
type Item = T;
type Error = S::Error;
fn poll(&mut self, task: &mut Task) -> Poll<T, S::Error> {
loop {
match mem::replace(&mut self.state, State::Empty) {
State::Empty => panic!("cannot poll Fold twice"),
State::Ready(state) => {
match self.stream.poll(task) {
Poll::Ok(Some(e)) => {
let future = (self.f)(state, e);
self.state = State::Processing(future.into_future());
}
Poll::Ok(None) => return Poll::Ok(state),
Poll::Err(e) => return Poll::Err(e),
Poll::NotReady => {
self.state = State::Ready(state);
return Poll::NotReady
}
}
}
State::Processing(mut fut) => {
match fut.poll(task) {
Poll::Ok(state) => self.state = State::Ready(state),
Poll::Err(e) => return Poll::Err(e.into()),
Poll::NotReady => {
self.state = State::Processing(fut);
return Poll::NotReady;
}
}
}
}
}
}
fn schedule(&mut self, task: &mut Task) {
match self.state {
State::Empty => panic!("cannot `schedule` a completed Fold"),
State::Ready(_) => self.stream.schedule(task),
State::Processing(ref mut fut) => fut.schedule(task),
}
}
}
|
Fut::Error: Into<S::Error>,
|
random_line_split
|
extern-pass-TwoU32s.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a foreign function that accepts and returns a struct
// by value.
#[derive(PartialEq, Show)]
pub struct TwoU32s {
one: u32, two: u32
}
impl Copy for TwoU32s {}
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_extern_identity_TwoU32s(v: TwoU32s) -> TwoU32s;
}
pub fn main() {
unsafe {
let x = TwoU32s {one: 22, two: 23};
let y = rust_dbg_extern_identity_TwoU32s(x);
assert_eq!(x, y);
}
}
|
random_line_split
|
|
extern-pass-TwoU32s.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a foreign function that accepts and returns a struct
// by value.
#[derive(PartialEq, Show)]
pub struct TwoU32s {
one: u32, two: u32
}
impl Copy for TwoU32s {}
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_extern_identity_TwoU32s(v: TwoU32s) -> TwoU32s;
}
pub fn
|
() {
unsafe {
let x = TwoU32s {one: 22, two: 23};
let y = rust_dbg_extern_identity_TwoU32s(x);
assert_eq!(x, y);
}
}
|
main
|
identifier_name
|
test_sockopt.rs
|
use rand::{thread_rng, Rng};
use nix::sys::socket::{socket, sockopt, getsockopt, setsockopt, AddressFamily, SockType, SockFlag, SockProtocol};
#[cfg(any(target_os = "android", target_os = "linux"))]
use crate::*;
// NB: FreeBSD supports LOCAL_PEERCRED for SOCK_SEQPACKET, but OSX does not.
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
))]
#[test]
pub fn test_local_peercred_seqpacket() {
use nix::{
unistd::{Gid, Uid},
sys::socket::socketpair
};
let (fd1, _fd2) = socketpair(AddressFamily::Unix, SockType::SeqPacket, None,
SockFlag::empty()).unwrap();
let xucred = getsockopt(fd1, sockopt::LocalPeerCred).unwrap();
assert_eq!(xucred.version(), 0);
assert_eq!(Uid::from_raw(xucred.uid()), Uid::current());
assert_eq!(Gid::from_raw(xucred.groups()[0]), Gid::current());
}
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "macos",
target_os = "ios"
))]
#[test]
pub fn test_local_peercred_stream() {
use nix::{
unistd::{Gid, Uid},
sys::socket::socketpair
};
let (fd1, _fd2) = socketpair(AddressFamily::Unix, SockType::Stream, None,
SockFlag::empty()).unwrap();
let xucred = getsockopt(fd1, sockopt::LocalPeerCred).unwrap();
assert_eq!(xucred.version(), 0);
assert_eq!(Uid::from_raw(xucred.uid()), Uid::current());
assert_eq!(Gid::from_raw(xucred.groups()[0]), Gid::current());
}
#[cfg(target_os = "linux")]
#[test]
fn is_so_mark_functional() {
use nix::sys::socket::sockopt;
require_capability!("is_so_mark_functional", CAP_NET_ADMIN);
let s = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
setsockopt(s, sockopt::Mark, &1337).unwrap();
let mark = getsockopt(s, sockopt::Mark).unwrap();
assert_eq!(mark, 1337);
}
#[test]
fn test_so_buf() {
let fd = socket(AddressFamily::Inet, SockType::Datagram, SockFlag::empty(), SockProtocol::Udp)
.unwrap();
let bufsize: usize = thread_rng().gen_range(4096..131_072);
setsockopt(fd, sockopt::SndBuf, &bufsize).unwrap();
let actual = getsockopt(fd, sockopt::SndBuf).unwrap();
assert!(actual >= bufsize);
setsockopt(fd, sockopt::RcvBuf, &bufsize).unwrap();
let actual = getsockopt(fd, sockopt::RcvBuf).unwrap();
assert!(actual >= bufsize);
}
#[test]
fn test_so_tcp_maxseg() {
use std::net::SocketAddr;
use std::str::FromStr;
use nix::sys::socket::{accept, bind, connect, listen, InetAddr, SockAddr};
use nix::unistd::{close, write};
let std_sa = SocketAddr::from_str("127.0.0.1:4001").unwrap();
let inet_addr = InetAddr::from_std(&std_sa);
let sock_addr = SockAddr::new_inet(inet_addr);
let rsock = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp)
.unwrap();
bind(rsock, &sock_addr).unwrap();
listen(rsock, 10).unwrap();
let initial = getsockopt(rsock, sockopt::TcpMaxSeg).unwrap();
// Initial MSS is expected to be 536 (https://tools.ietf.org/html/rfc879#section-1) but some
// platforms keep it even lower. This might fail if you've tuned your initial MSS to be larger
// than 700
cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
let segsize: u32 = 873;
assert!(initial < segsize);
setsockopt(rsock, sockopt::TcpMaxSeg, &segsize).unwrap();
} else {
assert!(initial < 700);
}
}
// Connect and check the MSS that was advertised
let ssock = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp)
.unwrap();
connect(ssock, &sock_addr).unwrap();
let rsess = accept(rsock).unwrap();
write(rsess, b"hello").unwrap();
let actual = getsockopt(ssock, sockopt::TcpMaxSeg).unwrap();
// Actual max segment size takes header lengths into account, max IPv4 options (60 bytes) + max
// TCP options (40 bytes) are subtracted from the requested maximum as a lower boundary.
cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
assert!((segsize - 100) <= actual);
assert!(actual <= segsize);
} else {
assert!(initial < actual);
assert!(536 < actual);
}
}
close(rsock).unwrap();
close(ssock).unwrap();
}
// The CI doesn't supported getsockopt and setsockopt on emulated processors.
// It's believed that a QEMU issue, the tests run ok on a fully emulated system.
// Current CI just run the binary with QEMU but the Kernel remains the same as the host.
// So the syscall doesn't work properly unless the kernel is also emulated.
#[test]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(target_os = "freebsd", target_os = "linux")
))]
fn test_tcp_congestion() {
use std::ffi::OsString;
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
let val = getsockopt(fd, sockopt::TcpCongestion).unwrap();
setsockopt(fd, sockopt::TcpCongestion, &val).unwrap();
setsockopt(fd, sockopt::TcpCongestion, &OsString::from("tcp_congestion_does_not_exist")).unwrap_err();
assert_eq!(
getsockopt(fd, sockopt::TcpCongestion).unwrap(),
val
);
}
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_bindtodevice()
|
#[test]
fn test_so_tcp_keepalive() {
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp).unwrap();
setsockopt(fd, sockopt::KeepAlive, &true).unwrap();
assert!(getsockopt(fd, sockopt::KeepAlive).unwrap());
#[cfg(any(target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "linux",
target_os = "nacl"))] {
let x = getsockopt(fd, sockopt::TcpKeepIdle).unwrap();
setsockopt(fd, sockopt::TcpKeepIdle, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepIdle).unwrap(), x + 1);
let x = getsockopt(fd, sockopt::TcpKeepCount).unwrap();
setsockopt(fd, sockopt::TcpKeepCount, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepCount).unwrap(), x + 1);
let x = getsockopt(fd, sockopt::TcpKeepInterval).unwrap();
setsockopt(fd, sockopt::TcpKeepInterval, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepInterval).unwrap(), x + 1);
}
}
#[test]
#[cfg(any(target_os = "android", target_os = "freebsd", target_os = "linux"))]
fn test_ttl_opts() {
let fd4 = socket(AddressFamily::Inet, SockType::Datagram, SockFlag::empty(), None).unwrap();
setsockopt(fd4, sockopt::Ipv4Ttl, &1)
.expect("setting ipv4ttl on an inet socket should succeed");
let fd6 = socket(AddressFamily::Inet6, SockType::Datagram, SockFlag::empty(), None).unwrap();
setsockopt(fd6, sockopt::Ipv6Ttl, &1)
.expect("setting ipv6ttl on an inet6 socket should succeed");
}
|
{
skip_if_not_root!("test_bindtodevice");
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
let val = getsockopt(fd, sockopt::BindToDevice).unwrap();
setsockopt(fd, sockopt::BindToDevice, &val).unwrap();
assert_eq!(
getsockopt(fd, sockopt::BindToDevice).unwrap(),
val
);
}
|
identifier_body
|
test_sockopt.rs
|
use rand::{thread_rng, Rng};
use nix::sys::socket::{socket, sockopt, getsockopt, setsockopt, AddressFamily, SockType, SockFlag, SockProtocol};
#[cfg(any(target_os = "android", target_os = "linux"))]
use crate::*;
// NB: FreeBSD supports LOCAL_PEERCRED for SOCK_SEQPACKET, but OSX does not.
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
))]
#[test]
pub fn test_local_peercred_seqpacket() {
use nix::{
unistd::{Gid, Uid},
sys::socket::socketpair
};
let (fd1, _fd2) = socketpair(AddressFamily::Unix, SockType::SeqPacket, None,
SockFlag::empty()).unwrap();
let xucred = getsockopt(fd1, sockopt::LocalPeerCred).unwrap();
assert_eq!(xucred.version(), 0);
assert_eq!(Uid::from_raw(xucred.uid()), Uid::current());
assert_eq!(Gid::from_raw(xucred.groups()[0]), Gid::current());
}
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "macos",
target_os = "ios"
))]
#[test]
pub fn test_local_peercred_stream() {
use nix::{
unistd::{Gid, Uid},
sys::socket::socketpair
};
let (fd1, _fd2) = socketpair(AddressFamily::Unix, SockType::Stream, None,
SockFlag::empty()).unwrap();
let xucred = getsockopt(fd1, sockopt::LocalPeerCred).unwrap();
assert_eq!(xucred.version(), 0);
assert_eq!(Uid::from_raw(xucred.uid()), Uid::current());
assert_eq!(Gid::from_raw(xucred.groups()[0]), Gid::current());
}
#[cfg(target_os = "linux")]
#[test]
fn is_so_mark_functional() {
use nix::sys::socket::sockopt;
require_capability!("is_so_mark_functional", CAP_NET_ADMIN);
let s = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
setsockopt(s, sockopt::Mark, &1337).unwrap();
let mark = getsockopt(s, sockopt::Mark).unwrap();
assert_eq!(mark, 1337);
}
#[test]
fn test_so_buf() {
let fd = socket(AddressFamily::Inet, SockType::Datagram, SockFlag::empty(), SockProtocol::Udp)
.unwrap();
let bufsize: usize = thread_rng().gen_range(4096..131_072);
setsockopt(fd, sockopt::SndBuf, &bufsize).unwrap();
let actual = getsockopt(fd, sockopt::SndBuf).unwrap();
assert!(actual >= bufsize);
setsockopt(fd, sockopt::RcvBuf, &bufsize).unwrap();
let actual = getsockopt(fd, sockopt::RcvBuf).unwrap();
assert!(actual >= bufsize);
}
#[test]
fn test_so_tcp_maxseg() {
use std::net::SocketAddr;
use std::str::FromStr;
use nix::sys::socket::{accept, bind, connect, listen, InetAddr, SockAddr};
use nix::unistd::{close, write};
let std_sa = SocketAddr::from_str("127.0.0.1:4001").unwrap();
let inet_addr = InetAddr::from_std(&std_sa);
let sock_addr = SockAddr::new_inet(inet_addr);
let rsock = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp)
.unwrap();
bind(rsock, &sock_addr).unwrap();
listen(rsock, 10).unwrap();
let initial = getsockopt(rsock, sockopt::TcpMaxSeg).unwrap();
// Initial MSS is expected to be 536 (https://tools.ietf.org/html/rfc879#section-1) but some
// platforms keep it even lower. This might fail if you've tuned your initial MSS to be larger
// than 700
cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
let segsize: u32 = 873;
assert!(initial < segsize);
setsockopt(rsock, sockopt::TcpMaxSeg, &segsize).unwrap();
} else {
assert!(initial < 700);
}
}
// Connect and check the MSS that was advertised
let ssock = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp)
.unwrap();
connect(ssock, &sock_addr).unwrap();
let rsess = accept(rsock).unwrap();
write(rsess, b"hello").unwrap();
let actual = getsockopt(ssock, sockopt::TcpMaxSeg).unwrap();
// Actual max segment size takes header lengths into account, max IPv4 options (60 bytes) + max
// TCP options (40 bytes) are subtracted from the requested maximum as a lower boundary.
cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
assert!((segsize - 100) <= actual);
assert!(actual <= segsize);
} else {
assert!(initial < actual);
assert!(536 < actual);
}
}
close(rsock).unwrap();
close(ssock).unwrap();
}
// The CI doesn't supported getsockopt and setsockopt on emulated processors.
// It's believed that a QEMU issue, the tests run ok on a fully emulated system.
// Current CI just run the binary with QEMU but the Kernel remains the same as the host.
// So the syscall doesn't work properly unless the kernel is also emulated.
#[test]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(target_os = "freebsd", target_os = "linux")
))]
fn test_tcp_congestion() {
use std::ffi::OsString;
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
let val = getsockopt(fd, sockopt::TcpCongestion).unwrap();
setsockopt(fd, sockopt::TcpCongestion, &val).unwrap();
setsockopt(fd, sockopt::TcpCongestion, &OsString::from("tcp_congestion_does_not_exist")).unwrap_err();
assert_eq!(
getsockopt(fd, sockopt::TcpCongestion).unwrap(),
val
);
}
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_bindtodevice() {
skip_if_not_root!("test_bindtodevice");
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
let val = getsockopt(fd, sockopt::BindToDevice).unwrap();
setsockopt(fd, sockopt::BindToDevice, &val).unwrap();
assert_eq!(
getsockopt(fd, sockopt::BindToDevice).unwrap(),
val
);
}
#[test]
fn test_so_tcp_keepalive() {
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp).unwrap();
setsockopt(fd, sockopt::KeepAlive, &true).unwrap();
assert!(getsockopt(fd, sockopt::KeepAlive).unwrap());
#[cfg(any(target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "linux",
target_os = "nacl"))] {
let x = getsockopt(fd, sockopt::TcpKeepIdle).unwrap();
setsockopt(fd, sockopt::TcpKeepIdle, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepIdle).unwrap(), x + 1);
let x = getsockopt(fd, sockopt::TcpKeepCount).unwrap();
setsockopt(fd, sockopt::TcpKeepCount, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepCount).unwrap(), x + 1);
|
}
}
#[test]
#[cfg(any(target_os = "android", target_os = "freebsd", target_os = "linux"))]
fn test_ttl_opts() {
let fd4 = socket(AddressFamily::Inet, SockType::Datagram, SockFlag::empty(), None).unwrap();
setsockopt(fd4, sockopt::Ipv4Ttl, &1)
.expect("setting ipv4ttl on an inet socket should succeed");
let fd6 = socket(AddressFamily::Inet6, SockType::Datagram, SockFlag::empty(), None).unwrap();
setsockopt(fd6, sockopt::Ipv6Ttl, &1)
.expect("setting ipv6ttl on an inet6 socket should succeed");
}
|
let x = getsockopt(fd, sockopt::TcpKeepInterval).unwrap();
setsockopt(fd, sockopt::TcpKeepInterval, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepInterval).unwrap(), x + 1);
|
random_line_split
|
test_sockopt.rs
|
use rand::{thread_rng, Rng};
use nix::sys::socket::{socket, sockopt, getsockopt, setsockopt, AddressFamily, SockType, SockFlag, SockProtocol};
#[cfg(any(target_os = "android", target_os = "linux"))]
use crate::*;
// NB: FreeBSD supports LOCAL_PEERCRED for SOCK_SEQPACKET, but OSX does not.
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
))]
#[test]
pub fn test_local_peercred_seqpacket() {
use nix::{
unistd::{Gid, Uid},
sys::socket::socketpair
};
let (fd1, _fd2) = socketpair(AddressFamily::Unix, SockType::SeqPacket, None,
SockFlag::empty()).unwrap();
let xucred = getsockopt(fd1, sockopt::LocalPeerCred).unwrap();
assert_eq!(xucred.version(), 0);
assert_eq!(Uid::from_raw(xucred.uid()), Uid::current());
assert_eq!(Gid::from_raw(xucred.groups()[0]), Gid::current());
}
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "macos",
target_os = "ios"
))]
#[test]
pub fn
|
() {
use nix::{
unistd::{Gid, Uid},
sys::socket::socketpair
};
let (fd1, _fd2) = socketpair(AddressFamily::Unix, SockType::Stream, None,
SockFlag::empty()).unwrap();
let xucred = getsockopt(fd1, sockopt::LocalPeerCred).unwrap();
assert_eq!(xucred.version(), 0);
assert_eq!(Uid::from_raw(xucred.uid()), Uid::current());
assert_eq!(Gid::from_raw(xucred.groups()[0]), Gid::current());
}
#[cfg(target_os = "linux")]
#[test]
fn is_so_mark_functional() {
use nix::sys::socket::sockopt;
require_capability!("is_so_mark_functional", CAP_NET_ADMIN);
let s = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
setsockopt(s, sockopt::Mark, &1337).unwrap();
let mark = getsockopt(s, sockopt::Mark).unwrap();
assert_eq!(mark, 1337);
}
#[test]
fn test_so_buf() {
let fd = socket(AddressFamily::Inet, SockType::Datagram, SockFlag::empty(), SockProtocol::Udp)
.unwrap();
let bufsize: usize = thread_rng().gen_range(4096..131_072);
setsockopt(fd, sockopt::SndBuf, &bufsize).unwrap();
let actual = getsockopt(fd, sockopt::SndBuf).unwrap();
assert!(actual >= bufsize);
setsockopt(fd, sockopt::RcvBuf, &bufsize).unwrap();
let actual = getsockopt(fd, sockopt::RcvBuf).unwrap();
assert!(actual >= bufsize);
}
#[test]
fn test_so_tcp_maxseg() {
use std::net::SocketAddr;
use std::str::FromStr;
use nix::sys::socket::{accept, bind, connect, listen, InetAddr, SockAddr};
use nix::unistd::{close, write};
let std_sa = SocketAddr::from_str("127.0.0.1:4001").unwrap();
let inet_addr = InetAddr::from_std(&std_sa);
let sock_addr = SockAddr::new_inet(inet_addr);
let rsock = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp)
.unwrap();
bind(rsock, &sock_addr).unwrap();
listen(rsock, 10).unwrap();
let initial = getsockopt(rsock, sockopt::TcpMaxSeg).unwrap();
// Initial MSS is expected to be 536 (https://tools.ietf.org/html/rfc879#section-1) but some
// platforms keep it even lower. This might fail if you've tuned your initial MSS to be larger
// than 700
cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
let segsize: u32 = 873;
assert!(initial < segsize);
setsockopt(rsock, sockopt::TcpMaxSeg, &segsize).unwrap();
} else {
assert!(initial < 700);
}
}
// Connect and check the MSS that was advertised
let ssock = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp)
.unwrap();
connect(ssock, &sock_addr).unwrap();
let rsess = accept(rsock).unwrap();
write(rsess, b"hello").unwrap();
let actual = getsockopt(ssock, sockopt::TcpMaxSeg).unwrap();
// Actual max segment size takes header lengths into account, max IPv4 options (60 bytes) + max
// TCP options (40 bytes) are subtracted from the requested maximum as a lower boundary.
cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
assert!((segsize - 100) <= actual);
assert!(actual <= segsize);
} else {
assert!(initial < actual);
assert!(536 < actual);
}
}
close(rsock).unwrap();
close(ssock).unwrap();
}
// The CI doesn't supported getsockopt and setsockopt on emulated processors.
// It's believed that a QEMU issue, the tests run ok on a fully emulated system.
// Current CI just run the binary with QEMU but the Kernel remains the same as the host.
// So the syscall doesn't work properly unless the kernel is also emulated.
#[test]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(target_os = "freebsd", target_os = "linux")
))]
fn test_tcp_congestion() {
use std::ffi::OsString;
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
let val = getsockopt(fd, sockopt::TcpCongestion).unwrap();
setsockopt(fd, sockopt::TcpCongestion, &val).unwrap();
setsockopt(fd, sockopt::TcpCongestion, &OsString::from("tcp_congestion_does_not_exist")).unwrap_err();
assert_eq!(
getsockopt(fd, sockopt::TcpCongestion).unwrap(),
val
);
}
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_bindtodevice() {
skip_if_not_root!("test_bindtodevice");
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), None).unwrap();
let val = getsockopt(fd, sockopt::BindToDevice).unwrap();
setsockopt(fd, sockopt::BindToDevice, &val).unwrap();
assert_eq!(
getsockopt(fd, sockopt::BindToDevice).unwrap(),
val
);
}
#[test]
fn test_so_tcp_keepalive() {
let fd = socket(AddressFamily::Inet, SockType::Stream, SockFlag::empty(), SockProtocol::Tcp).unwrap();
setsockopt(fd, sockopt::KeepAlive, &true).unwrap();
assert!(getsockopt(fd, sockopt::KeepAlive).unwrap());
#[cfg(any(target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "linux",
target_os = "nacl"))] {
let x = getsockopt(fd, sockopt::TcpKeepIdle).unwrap();
setsockopt(fd, sockopt::TcpKeepIdle, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepIdle).unwrap(), x + 1);
let x = getsockopt(fd, sockopt::TcpKeepCount).unwrap();
setsockopt(fd, sockopt::TcpKeepCount, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepCount).unwrap(), x + 1);
let x = getsockopt(fd, sockopt::TcpKeepInterval).unwrap();
setsockopt(fd, sockopt::TcpKeepInterval, &(x + 1)).unwrap();
assert_eq!(getsockopt(fd, sockopt::TcpKeepInterval).unwrap(), x + 1);
}
}
#[test]
#[cfg(any(target_os = "android", target_os = "freebsd", target_os = "linux"))]
fn test_ttl_opts() {
let fd4 = socket(AddressFamily::Inet, SockType::Datagram, SockFlag::empty(), None).unwrap();
setsockopt(fd4, sockopt::Ipv4Ttl, &1)
.expect("setting ipv4ttl on an inet socket should succeed");
let fd6 = socket(AddressFamily::Inet6, SockType::Datagram, SockFlag::empty(), None).unwrap();
setsockopt(fd6, sockopt::Ipv6Ttl, &1)
.expect("setting ipv6ttl on an inet6 socket should succeed");
}
|
test_local_peercred_stream
|
identifier_name
|
gui.rs
|
use edit::Editor;
use std::fmt::Write;
use gtk::{self, Widget, Window, Frame, EventBox, DrawingArea, WindowPosition};
use gtk::signal::Inhibit;
use gtk::traits::*;
use gdk::EventType;
use cairo::{Context, Antialias};
use cairo::enums::{FontSlant, FontWeight};
pub struct Gui {
#[allow(dead_code)]
win: &'static mut Window,
edit: Editor,
}
impl Gui {
pub fn new(win: &'static mut Window) -> Gui {
win.set_title("Propositional Logic Calculator");
win.set_border_width(10);
win.set_window_position(WindowPosition::Center);
win.set_double_buffered(true);
win.set_default_size(600, 500);
win.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(true)
});
let da_frame = Frame::new(None).unwrap();
{
let eb = EventBox::new().unwrap();
win.connect_key_press_event(|_, e| {
if e._type == EventType::KeyPress {
let ih = ::get_gui().edit.handle_input(e);
if ih == Inhibit(true) {
::get_gui().dirty();
}
ih
} else {
Inhibit(false)
}
});
let da = DrawingArea::new().unwrap();
da.connect_draw(|w: Widget, c: Context| {
::get_gui().render(w, c);
Inhibit(false)
});
eb.add(&da);
da_frame.add(&eb);
}
win.add(&da_frame);
win.show_all();
Gui {
win: win,
edit: Editor::new(),
}
}
pub fn dirty(&self) {
self.win.queue_draw();
}
pub fn render(&self, w: Widget, c: Context) {
let (_alloc_w, _alloc_h) = (w.get_allocated_width(), w.get_allocated_height());
const FONT_SIZE: f64 = 17.0;
const SCALE: f64 = FONT_SIZE * 1.0; // 1.0 for Helvetica,
c.select_font_face("Times New Roman", FontSlant::Normal, FontWeight::Normal);
c.set_antialias(Antialias::Best);
c.set_font_size(FONT_SIZE);
c.new_path();
c.translate((SCALE * 0.1).floor(), (SCALE + 10.0).floor());
// Act like there are 10 lines in the proof for the sake of spacing.
let lines_len = if self.edit.lines().len() < 10 { 10 } else { self.edit.lines().len() };
let start_offset = ((SCALE / 2.5) + SCALE * 0.5 * ((lines_len as f64).log10().floor() + 1.0)).floor();
c.translate(start_offset, 0.0);
for l in self.edit.lines().iter() {
let mut undo_x = 0.0;
{ // Render the line number (Align the points all at the same x co-ordinate)
let s = format!("{}.", l.no + 1);
c.new_path();
c.text_path(&s);
let p = c.copy_path();
let ex = c.fill_extents();
c.new_path();
//print!("ex0: {}, ex1: {}, ex2: {}, ex3: {}", ex.0, ex.1, ex.2, ex.3);
let offset = -ex.2.floor();
//println!(", undo_x: {}", undo_x);
c.translate(offset, 0.0);
c.append_path(&p);
c.translate(-offset, 0.0);
undo_x = 0.0;
c.fill();
}
{ // Render the `step` part of the line
let s = l.step.to_gui_string(true);
c.new_path();
let trans_x = SCALE * 0.5;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
{ // Render the `method` part of the line
let s = l.method.to_gui_string(false);
c.new_path();
let trans_x = SCALE * 20.0;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
{ // Render the dependencies of the line
let mut s = String::with_capacity(32);
s.push('{');
let max = l.deps.len().wrapping_sub(1);
for (i, dep) in l.deps.iter().enumerate() {
let _ = write!(s, "{}", dep + 1);
if i!= max {
s.push(',');
s.push(' ');
}
}
s.push('}');
|
c.fill();
}
c.translate(-undo_x, SCALE + 10.0);
}
}
}
|
let trans_x = SCALE * 8.0;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
|
random_line_split
|
gui.rs
|
use edit::Editor;
use std::fmt::Write;
use gtk::{self, Widget, Window, Frame, EventBox, DrawingArea, WindowPosition};
use gtk::signal::Inhibit;
use gtk::traits::*;
use gdk::EventType;
use cairo::{Context, Antialias};
use cairo::enums::{FontSlant, FontWeight};
pub struct Gui {
#[allow(dead_code)]
win: &'static mut Window,
edit: Editor,
}
impl Gui {
pub fn new(win: &'static mut Window) -> Gui {
win.set_title("Propositional Logic Calculator");
win.set_border_width(10);
win.set_window_position(WindowPosition::Center);
win.set_double_buffered(true);
win.set_default_size(600, 500);
win.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(true)
});
let da_frame = Frame::new(None).unwrap();
{
let eb = EventBox::new().unwrap();
win.connect_key_press_event(|_, e| {
if e._type == EventType::KeyPress {
let ih = ::get_gui().edit.handle_input(e);
if ih == Inhibit(true) {
::get_gui().dirty();
}
ih
} else {
Inhibit(false)
}
});
let da = DrawingArea::new().unwrap();
da.connect_draw(|w: Widget, c: Context| {
::get_gui().render(w, c);
Inhibit(false)
});
eb.add(&da);
da_frame.add(&eb);
}
win.add(&da_frame);
win.show_all();
Gui {
win: win,
edit: Editor::new(),
}
}
pub fn
|
(&self) {
self.win.queue_draw();
}
pub fn render(&self, w: Widget, c: Context) {
let (_alloc_w, _alloc_h) = (w.get_allocated_width(), w.get_allocated_height());
const FONT_SIZE: f64 = 17.0;
const SCALE: f64 = FONT_SIZE * 1.0; // 1.0 for Helvetica,
c.select_font_face("Times New Roman", FontSlant::Normal, FontWeight::Normal);
c.set_antialias(Antialias::Best);
c.set_font_size(FONT_SIZE);
c.new_path();
c.translate((SCALE * 0.1).floor(), (SCALE + 10.0).floor());
// Act like there are 10 lines in the proof for the sake of spacing.
let lines_len = if self.edit.lines().len() < 10 { 10 } else { self.edit.lines().len() };
let start_offset = ((SCALE / 2.5) + SCALE * 0.5 * ((lines_len as f64).log10().floor() + 1.0)).floor();
c.translate(start_offset, 0.0);
for l in self.edit.lines().iter() {
let mut undo_x = 0.0;
{ // Render the line number (Align the points all at the same x co-ordinate)
let s = format!("{}.", l.no + 1);
c.new_path();
c.text_path(&s);
let p = c.copy_path();
let ex = c.fill_extents();
c.new_path();
//print!("ex0: {}, ex1: {}, ex2: {}, ex3: {}", ex.0, ex.1, ex.2, ex.3);
let offset = -ex.2.floor();
//println!(", undo_x: {}", undo_x);
c.translate(offset, 0.0);
c.append_path(&p);
c.translate(-offset, 0.0);
undo_x = 0.0;
c.fill();
}
{ // Render the `step` part of the line
let s = l.step.to_gui_string(true);
c.new_path();
let trans_x = SCALE * 0.5;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
{ // Render the `method` part of the line
let s = l.method.to_gui_string(false);
c.new_path();
let trans_x = SCALE * 20.0;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
{ // Render the dependencies of the line
let mut s = String::with_capacity(32);
s.push('{');
let max = l.deps.len().wrapping_sub(1);
for (i, dep) in l.deps.iter().enumerate() {
let _ = write!(s, "{}", dep + 1);
if i!= max {
s.push(',');
s.push(' ');
}
}
s.push('}');
let trans_x = SCALE * 8.0;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
c.translate(-undo_x, SCALE + 10.0);
}
}
}
|
dirty
|
identifier_name
|
gui.rs
|
use edit::Editor;
use std::fmt::Write;
use gtk::{self, Widget, Window, Frame, EventBox, DrawingArea, WindowPosition};
use gtk::signal::Inhibit;
use gtk::traits::*;
use gdk::EventType;
use cairo::{Context, Antialias};
use cairo::enums::{FontSlant, FontWeight};
pub struct Gui {
#[allow(dead_code)]
win: &'static mut Window,
edit: Editor,
}
impl Gui {
pub fn new(win: &'static mut Window) -> Gui {
win.set_title("Propositional Logic Calculator");
win.set_border_width(10);
win.set_window_position(WindowPosition::Center);
win.set_double_buffered(true);
win.set_default_size(600, 500);
win.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(true)
});
let da_frame = Frame::new(None).unwrap();
{
let eb = EventBox::new().unwrap();
win.connect_key_press_event(|_, e| {
if e._type == EventType::KeyPress {
let ih = ::get_gui().edit.handle_input(e);
if ih == Inhibit(true) {
::get_gui().dirty();
}
ih
} else {
Inhibit(false)
}
});
let da = DrawingArea::new().unwrap();
da.connect_draw(|w: Widget, c: Context| {
::get_gui().render(w, c);
Inhibit(false)
});
eb.add(&da);
da_frame.add(&eb);
}
win.add(&da_frame);
win.show_all();
Gui {
win: win,
edit: Editor::new(),
}
}
pub fn dirty(&self) {
self.win.queue_draw();
}
pub fn render(&self, w: Widget, c: Context) {
let (_alloc_w, _alloc_h) = (w.get_allocated_width(), w.get_allocated_height());
const FONT_SIZE: f64 = 17.0;
const SCALE: f64 = FONT_SIZE * 1.0; // 1.0 for Helvetica,
c.select_font_face("Times New Roman", FontSlant::Normal, FontWeight::Normal);
c.set_antialias(Antialias::Best);
c.set_font_size(FONT_SIZE);
c.new_path();
c.translate((SCALE * 0.1).floor(), (SCALE + 10.0).floor());
// Act like there are 10 lines in the proof for the sake of spacing.
let lines_len = if self.edit.lines().len() < 10 { 10 } else { self.edit.lines().len() };
let start_offset = ((SCALE / 2.5) + SCALE * 0.5 * ((lines_len as f64).log10().floor() + 1.0)).floor();
c.translate(start_offset, 0.0);
for l in self.edit.lines().iter() {
let mut undo_x = 0.0;
{ // Render the line number (Align the points all at the same x co-ordinate)
let s = format!("{}.", l.no + 1);
c.new_path();
c.text_path(&s);
let p = c.copy_path();
let ex = c.fill_extents();
c.new_path();
//print!("ex0: {}, ex1: {}, ex2: {}, ex3: {}", ex.0, ex.1, ex.2, ex.3);
let offset = -ex.2.floor();
//println!(", undo_x: {}", undo_x);
c.translate(offset, 0.0);
c.append_path(&p);
c.translate(-offset, 0.0);
undo_x = 0.0;
c.fill();
}
{ // Render the `step` part of the line
let s = l.step.to_gui_string(true);
c.new_path();
let trans_x = SCALE * 0.5;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
{ // Render the `method` part of the line
let s = l.method.to_gui_string(false);
c.new_path();
let trans_x = SCALE * 20.0;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
{ // Render the dependencies of the line
let mut s = String::with_capacity(32);
s.push('{');
let max = l.deps.len().wrapping_sub(1);
for (i, dep) in l.deps.iter().enumerate() {
let _ = write!(s, "{}", dep + 1);
if i!= max
|
}
s.push('}');
let trans_x = SCALE * 8.0;
undo_x += trans_x;
c.translate(trans_x, 0.0);
c.text_path(&s);
c.fill();
}
c.translate(-undo_x, SCALE + 10.0);
}
}
}
|
{
s.push(',');
s.push(' ');
}
|
conditional_block
|
main.rs
|
extern crate structopt;
#[macro_use]
extern crate structopt_derive;
use structopt::StructOpt;
use std::collections::BTreeSet;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct Component {
input: usize,
output: usize,
}
impl Component {
fn flip(&self) -> Component {
Component {
input: self.output,
output: self.input,
}
}
}
fn parse(input: &str) -> BTreeSet<Component> {
let mut components = BTreeSet::new();
for line in input.lines() {
let (i, o) = line.split_at(
line.find('/').expect(&format!("invalid line: {}", line)),
);
let new_comp = Component {
input: i.trim().parse().expect(&format!("invalid size: {}", i)),
output: o[1..]
.trim()
.parse()
.expect(&format!("invalid size: {}", o)),
};
assert!(
!components.contains(&new_comp)
&&!components.contains(&new_comp.flip()),
"Repeat component found! You'll need a better algorithm."
);
components.insert(new_comp);
}
components
}
fn strength<Iter>(iter: Iter) -> usize
where
Iter: IntoIterator<Item = Component>,
{
iter.into_iter().map(|c| c.input + c.output).sum()
}
fn
|
(start: usize, components: &BTreeSet<Component>) -> usize {
components
.iter()
.filter_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.output, &new_set) + component.input
+ component.output,
)
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.input, &new_set) + component.input
+ component.output,
)
} else {
None
}
})
.max()
.unwrap_or_default()
}
fn part1(components: &BTreeSet<Component>) -> usize {
max_strength(0, components)
}
fn possibilities(
start: usize,
components: &BTreeSet<Component>,
) -> Vec<Vec<Component>> {
components
.iter()
.flat_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.output, &new_set);
for mut p in &mut ps {
p.push(*component);
}
ps.push(vec![*component]);
ps
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.input, &new_set);
for mut p in &mut ps {
p.push(component.flip());
}
ps.push(vec![*component]);
ps
} else {
vec![]
}
})
.collect()
}
fn part2(components: &BTreeSet<Component>) -> usize {
possibilities(0, components)
.into_iter()
.max_by(|a, b| {
a.len()
.cmp(&b.len())
.then_with(|| strength(a.clone()).cmp(&strength(b.clone())))
})
.map(strength)
.unwrap_or_default()
}
fn main() {
let opt = Opt::from_args();
let mut contents = String::new();
if opt.input.to_str() == Some("-") {
std::io::stdin()
.read_to_string(&mut contents)
.expect("could not read stdin");
} else {
let mut file = File::open(&opt.input)
.expect(&format!("file {} not found", opt.input.display()));
file.read_to_string(&mut contents)
.expect(&format!("could not read file {}", opt.input.display()));
}
let components = parse(&contents);
println!("Part 1: {}", part1(&components));
println!("Part 2: {}", part2(&components));
}
#[derive(StructOpt, Debug)]
#[structopt(name = "day24", about = "Advent of code 2017 day 24")]
struct Opt {
#[structopt(help = "Input file", parse(from_os_str))]
input: PathBuf,
}
#[cfg(test)]
mod tests {
use super::*;
static INPUT: &str = concat!(
"0/2\n",
"2/2\n",
"2/3\n",
"3/4\n",
"3/5\n",
"0/1\n",
"10/1\n",
"9/10"
);
#[test]
fn part1_test() {
let components = parse(INPUT);
assert_eq!(part1(&components), 31);
}
#[test]
fn part2_test() {
let components = parse(INPUT);
assert_eq!(part2(&components), 19);
}
}
|
max_strength
|
identifier_name
|
main.rs
|
extern crate structopt;
#[macro_use]
extern crate structopt_derive;
use structopt::StructOpt;
use std::collections::BTreeSet;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct Component {
input: usize,
output: usize,
}
impl Component {
fn flip(&self) -> Component {
Component {
input: self.output,
output: self.input,
}
}
}
fn parse(input: &str) -> BTreeSet<Component> {
let mut components = BTreeSet::new();
for line in input.lines() {
let (i, o) = line.split_at(
line.find('/').expect(&format!("invalid line: {}", line)),
);
let new_comp = Component {
input: i.trim().parse().expect(&format!("invalid size: {}", i)),
output: o[1..]
.trim()
.parse()
.expect(&format!("invalid size: {}", o)),
};
assert!(
!components.contains(&new_comp)
&&!components.contains(&new_comp.flip()),
"Repeat component found! You'll need a better algorithm."
);
components.insert(new_comp);
}
components
}
fn strength<Iter>(iter: Iter) -> usize
where
Iter: IntoIterator<Item = Component>,
{
iter.into_iter().map(|c| c.input + c.output).sum()
}
fn max_strength(start: usize, components: &BTreeSet<Component>) -> usize {
components
.iter()
.filter_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.output, &new_set) + component.input
+ component.output,
)
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.input, &new_set) + component.input
+ component.output,
)
} else {
None
}
})
.max()
.unwrap_or_default()
}
fn part1(components: &BTreeSet<Component>) -> usize {
max_strength(0, components)
}
fn possibilities(
start: usize,
components: &BTreeSet<Component>,
) -> Vec<Vec<Component>> {
components
.iter()
.flat_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.output, &new_set);
for mut p in &mut ps {
p.push(*component);
}
ps.push(vec![*component]);
ps
} else if component.output == start
|
else {
vec![]
}
})
.collect()
}
fn part2(components: &BTreeSet<Component>) -> usize {
possibilities(0, components)
.into_iter()
.max_by(|a, b| {
a.len()
.cmp(&b.len())
.then_with(|| strength(a.clone()).cmp(&strength(b.clone())))
})
.map(strength)
.unwrap_or_default()
}
fn main() {
let opt = Opt::from_args();
let mut contents = String::new();
if opt.input.to_str() == Some("-") {
std::io::stdin()
.read_to_string(&mut contents)
.expect("could not read stdin");
} else {
let mut file = File::open(&opt.input)
.expect(&format!("file {} not found", opt.input.display()));
file.read_to_string(&mut contents)
.expect(&format!("could not read file {}", opt.input.display()));
}
let components = parse(&contents);
println!("Part 1: {}", part1(&components));
println!("Part 2: {}", part2(&components));
}
#[derive(StructOpt, Debug)]
#[structopt(name = "day24", about = "Advent of code 2017 day 24")]
struct Opt {
#[structopt(help = "Input file", parse(from_os_str))]
input: PathBuf,
}
#[cfg(test)]
mod tests {
use super::*;
static INPUT: &str = concat!(
"0/2\n",
"2/2\n",
"2/3\n",
"3/4\n",
"3/5\n",
"0/1\n",
"10/1\n",
"9/10"
);
#[test]
fn part1_test() {
let components = parse(INPUT);
assert_eq!(part1(&components), 31);
}
#[test]
fn part2_test() {
let components = parse(INPUT);
assert_eq!(part2(&components), 19);
}
}
|
{
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.input, &new_set);
for mut p in &mut ps {
p.push(component.flip());
}
ps.push(vec![*component]);
ps
}
|
conditional_block
|
main.rs
|
extern crate structopt;
#[macro_use]
extern crate structopt_derive;
use structopt::StructOpt;
use std::collections::BTreeSet;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct Component {
input: usize,
output: usize,
}
impl Component {
fn flip(&self) -> Component {
Component {
input: self.output,
output: self.input,
}
}
}
fn parse(input: &str) -> BTreeSet<Component> {
let mut components = BTreeSet::new();
for line in input.lines() {
let (i, o) = line.split_at(
line.find('/').expect(&format!("invalid line: {}", line)),
);
let new_comp = Component {
input: i.trim().parse().expect(&format!("invalid size: {}", i)),
output: o[1..]
.trim()
.parse()
.expect(&format!("invalid size: {}", o)),
};
assert!(
!components.contains(&new_comp)
&&!components.contains(&new_comp.flip()),
"Repeat component found! You'll need a better algorithm."
);
components.insert(new_comp);
}
components
}
fn strength<Iter>(iter: Iter) -> usize
where
Iter: IntoIterator<Item = Component>,
{
iter.into_iter().map(|c| c.input + c.output).sum()
}
fn max_strength(start: usize, components: &BTreeSet<Component>) -> usize {
components
.iter()
.filter_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.output, &new_set) + component.input
+ component.output,
)
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.input, &new_set) + component.input
+ component.output,
)
} else {
None
}
})
.max()
.unwrap_or_default()
}
fn part1(components: &BTreeSet<Component>) -> usize
|
fn possibilities(
start: usize,
components: &BTreeSet<Component>,
) -> Vec<Vec<Component>> {
components
.iter()
.flat_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.output, &new_set);
for mut p in &mut ps {
p.push(*component);
}
ps.push(vec![*component]);
ps
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.input, &new_set);
for mut p in &mut ps {
p.push(component.flip());
}
ps.push(vec![*component]);
ps
} else {
vec![]
}
})
.collect()
}
fn part2(components: &BTreeSet<Component>) -> usize {
possibilities(0, components)
.into_iter()
.max_by(|a, b| {
a.len()
.cmp(&b.len())
.then_with(|| strength(a.clone()).cmp(&strength(b.clone())))
})
.map(strength)
.unwrap_or_default()
}
fn main() {
let opt = Opt::from_args();
let mut contents = String::new();
if opt.input.to_str() == Some("-") {
std::io::stdin()
.read_to_string(&mut contents)
.expect("could not read stdin");
} else {
let mut file = File::open(&opt.input)
.expect(&format!("file {} not found", opt.input.display()));
file.read_to_string(&mut contents)
.expect(&format!("could not read file {}", opt.input.display()));
}
let components = parse(&contents);
println!("Part 1: {}", part1(&components));
println!("Part 2: {}", part2(&components));
}
#[derive(StructOpt, Debug)]
#[structopt(name = "day24", about = "Advent of code 2017 day 24")]
struct Opt {
#[structopt(help = "Input file", parse(from_os_str))]
input: PathBuf,
}
#[cfg(test)]
mod tests {
use super::*;
static INPUT: &str = concat!(
"0/2\n",
"2/2\n",
"2/3\n",
"3/4\n",
"3/5\n",
"0/1\n",
"10/1\n",
"9/10"
);
#[test]
fn part1_test() {
let components = parse(INPUT);
assert_eq!(part1(&components), 31);
}
#[test]
fn part2_test() {
let components = parse(INPUT);
assert_eq!(part2(&components), 19);
}
}
|
{
max_strength(0, components)
}
|
identifier_body
|
main.rs
|
extern crate structopt;
#[macro_use]
extern crate structopt_derive;
use structopt::StructOpt;
use std::collections::BTreeSet;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct Component {
input: usize,
output: usize,
}
impl Component {
fn flip(&self) -> Component {
Component {
input: self.output,
output: self.input,
}
}
}
fn parse(input: &str) -> BTreeSet<Component> {
let mut components = BTreeSet::new();
for line in input.lines() {
let (i, o) = line.split_at(
line.find('/').expect(&format!("invalid line: {}", line)),
);
let new_comp = Component {
input: i.trim().parse().expect(&format!("invalid size: {}", i)),
output: o[1..]
.trim()
.parse()
.expect(&format!("invalid size: {}", o)),
};
assert!(
!components.contains(&new_comp)
&&!components.contains(&new_comp.flip()),
"Repeat component found! You'll need a better algorithm."
);
components.insert(new_comp);
}
components
}
fn strength<Iter>(iter: Iter) -> usize
where
Iter: IntoIterator<Item = Component>,
{
|
fn max_strength(start: usize, components: &BTreeSet<Component>) -> usize {
components
.iter()
.filter_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.output, &new_set) + component.input
+ component.output,
)
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
Some(
max_strength(component.input, &new_set) + component.input
+ component.output,
)
} else {
None
}
})
.max()
.unwrap_or_default()
}
fn part1(components: &BTreeSet<Component>) -> usize {
max_strength(0, components)
}
fn possibilities(
start: usize,
components: &BTreeSet<Component>,
) -> Vec<Vec<Component>> {
components
.iter()
.flat_map(|component| {
if component.input == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.output, &new_set);
for mut p in &mut ps {
p.push(*component);
}
ps.push(vec![*component]);
ps
} else if component.output == start {
let mut new_set = components.clone();
new_set.remove(component);
let mut ps = possibilities(component.input, &new_set);
for mut p in &mut ps {
p.push(component.flip());
}
ps.push(vec![*component]);
ps
} else {
vec![]
}
})
.collect()
}
fn part2(components: &BTreeSet<Component>) -> usize {
possibilities(0, components)
.into_iter()
.max_by(|a, b| {
a.len()
.cmp(&b.len())
.then_with(|| strength(a.clone()).cmp(&strength(b.clone())))
})
.map(strength)
.unwrap_or_default()
}
fn main() {
let opt = Opt::from_args();
let mut contents = String::new();
if opt.input.to_str() == Some("-") {
std::io::stdin()
.read_to_string(&mut contents)
.expect("could not read stdin");
} else {
let mut file = File::open(&opt.input)
.expect(&format!("file {} not found", opt.input.display()));
file.read_to_string(&mut contents)
.expect(&format!("could not read file {}", opt.input.display()));
}
let components = parse(&contents);
println!("Part 1: {}", part1(&components));
println!("Part 2: {}", part2(&components));
}
#[derive(StructOpt, Debug)]
#[structopt(name = "day24", about = "Advent of code 2017 day 24")]
struct Opt {
#[structopt(help = "Input file", parse(from_os_str))]
input: PathBuf,
}
#[cfg(test)]
mod tests {
use super::*;
static INPUT: &str = concat!(
"0/2\n",
"2/2\n",
"2/3\n",
"3/4\n",
"3/5\n",
"0/1\n",
"10/1\n",
"9/10"
);
#[test]
fn part1_test() {
let components = parse(INPUT);
assert_eq!(part1(&components), 31);
}
#[test]
fn part2_test() {
let components = parse(INPUT);
assert_eq!(part2(&components), 19);
}
}
|
iter.into_iter().map(|c| c.input + c.output).sum()
}
|
random_line_split
|
build.rs
|
// Copyright 2015 Brendan Zabarauskas and the gl-rs developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate gl_generator;
use gl_generator::*;
use std::env;
use std::fs::File;
use std::path::*;
fn
|
() {
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("test_symbols.rs")).unwrap();
Registry::new(Api::Gl, (4, 5), Profile::Core, Fallbacks::All, ["GL_ARB_debug_output"])
.write_bindings(GlobalGenerator, &mut file)
.unwrap();
}
|
main
|
identifier_name
|
build.rs
|
// Copyright 2015 Brendan Zabarauskas and the gl-rs developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
|
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate gl_generator;
use gl_generator::*;
use std::env;
use std::fs::File;
use std::path::*;
fn main() {
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("test_symbols.rs")).unwrap();
Registry::new(Api::Gl, (4, 5), Profile::Core, Fallbacks::All, ["GL_ARB_debug_output"])
.write_bindings(GlobalGenerator, &mut file)
.unwrap();
}
|
// Unless required by applicable law or agreed to in writing, software
|
random_line_split
|
build.rs
|
// Copyright 2015 Brendan Zabarauskas and the gl-rs developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate gl_generator;
use gl_generator::*;
use std::env;
use std::fs::File;
use std::path::*;
fn main()
|
{
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("test_symbols.rs")).unwrap();
Registry::new(Api::Gl, (4, 5), Profile::Core, Fallbacks::All, ["GL_ARB_debug_output"])
.write_bindings(GlobalGenerator, &mut file)
.unwrap();
}
|
identifier_body
|
|
ioapic.rs
|
/* SPDX-License-Identifier: GPL-2.0-only */
// to do: add a channel such that a local apic can send messages to io apic.
use crate::err::Error;
use crate::hv::interrupt_vcpu;
use crate::{GuestThread, VCPU};
use crossbeam_channel::{Receiver, Sender};
#[allow(unused_imports)]
use log::*;
use std::sync::{Arc, Mutex, RwLock};
const IOAPIC_NUM_PINS: u32 = 24;
const IOAPIC_REG_MAX: u32 = 0x10 + 2 * IOAPIC_NUM_PINS - 1;
const IOAPIC_VERSION: u32 = 0x11;
pub struct IoApic {
id: u32,
reg: u32,
arbid: u32,
pub value: [u32; 2 * IOAPIC_NUM_PINS as usize],
}
impl IoApic {
pub fn new() -> Self {
IoApic {
id: 0,
reg: 0,
arbid: 0,
value: [0; 2 * IOAPIC_NUM_PINS as usize],
}
}
pub fn dispatch(
intr_senders: Arc<Mutex<Option<Vec<Sender<u8>>>>>, // one sender for one guest thread
irq_receiver: Receiver<u32>, // receiver for collecting IRQs from hardware
|
let ioapic = ioapic.read().unwrap();
let vcpu_ids = vcpu_ids.read().unwrap();
let entry = ioapic.value[2 * irq as usize] as u64
| ((ioapic.value[2 * irq as usize + 1] as u64) << 32);
let vector = (entry & 0xff) as u8;
let dest = entry >> 56;
let senders = intr_senders.lock().unwrap();
if let Some(ref senders) = *senders {
if entry & (1 << 11) == 0 {
// physical mode
let dest = (dest & 0b1111) as usize;
senders[dest].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[dest..(dest + 1)]).unwrap();
} else {
// logical destination mode
for i in 0..8 {
if dest & (1 << i)!= 0 {
senders[i].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[i..(i + 1)]).unwrap();
}
}
}
} else {
error!(
"io apic gets irq 0x{:x}, but has no ways to send it to guest threads",
irq
);
}
}
}
pub fn write(&mut self, offset: usize, value: u32) {
if offset == 0 {
self.reg = value;
} else {
match self.reg {
0 => self.id = value,
0x10..=IOAPIC_REG_MAX => {
self.value[self.reg as usize - 0x10] = value;
}
_ => error!(
"guest writes 0x{:x} to an invalid/read-only register 0x{:x}",
value, self.reg
),
}
}
}
pub fn read(&self, offset: usize) -> u32 {
if offset == 0 {
self.reg
} else {
match self.reg {
0 => self.id,
1 => (IOAPIC_NUM_PINS << 16) | IOAPIC_VERSION, // 0x170011,
2 => self.arbid,
0x10..=IOAPIC_REG_MAX => self.value[self.reg as usize - 0x10],
_ => {
error!(
"guest reads from an invalid register 0x{:x}. return 0x{:x}",
self.reg,
u32::MAX
);
u32::MAX
}
}
}
}
}
pub fn ioapic_access(
_vcpu: &VCPU,
gth: &mut GuestThread,
gpa: usize,
reg_val: &mut u64,
_size: u8,
store: bool,
) -> Result<(), Error> {
let offset = gpa & 0xfffff;
if offset!= 0 && offset!= 0x10 {
error!(
"Bad register offset: {:x} and has to be 0x0 or 0x10",
offset
);
return Ok(());
}
let ioapic = >h.vm.ioapic;
if store {
ioapic.write().unwrap().write(offset, *reg_val as u32);
} else {
*reg_val = ioapic.read().unwrap().read(offset) as u64;
}
Ok(())
}
|
ioapic: Arc<RwLock<IoApic>>,
vcpu_ids: Arc<RwLock<Vec<u32>>>, // the actual Hypervisor vcpu id of each guest thread
) {
for irq in irq_receiver.iter() {
|
random_line_split
|
ioapic.rs
|
/* SPDX-License-Identifier: GPL-2.0-only */
// to do: add a channel such that a local apic can send messages to io apic.
use crate::err::Error;
use crate::hv::interrupt_vcpu;
use crate::{GuestThread, VCPU};
use crossbeam_channel::{Receiver, Sender};
#[allow(unused_imports)]
use log::*;
use std::sync::{Arc, Mutex, RwLock};
const IOAPIC_NUM_PINS: u32 = 24;
const IOAPIC_REG_MAX: u32 = 0x10 + 2 * IOAPIC_NUM_PINS - 1;
const IOAPIC_VERSION: u32 = 0x11;
pub struct IoApic {
id: u32,
reg: u32,
arbid: u32,
pub value: [u32; 2 * IOAPIC_NUM_PINS as usize],
}
impl IoApic {
pub fn new() -> Self {
IoApic {
id: 0,
reg: 0,
arbid: 0,
value: [0; 2 * IOAPIC_NUM_PINS as usize],
}
}
pub fn dispatch(
intr_senders: Arc<Mutex<Option<Vec<Sender<u8>>>>>, // one sender for one guest thread
irq_receiver: Receiver<u32>, // receiver for collecting IRQs from hardware
ioapic: Arc<RwLock<IoApic>>,
vcpu_ids: Arc<RwLock<Vec<u32>>>, // the actual Hypervisor vcpu id of each guest thread
) {
for irq in irq_receiver.iter() {
let ioapic = ioapic.read().unwrap();
let vcpu_ids = vcpu_ids.read().unwrap();
let entry = ioapic.value[2 * irq as usize] as u64
| ((ioapic.value[2 * irq as usize + 1] as u64) << 32);
let vector = (entry & 0xff) as u8;
let dest = entry >> 56;
let senders = intr_senders.lock().unwrap();
if let Some(ref senders) = *senders {
if entry & (1 << 11) == 0 {
// physical mode
let dest = (dest & 0b1111) as usize;
senders[dest].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[dest..(dest + 1)]).unwrap();
} else {
// logical destination mode
for i in 0..8 {
if dest & (1 << i)!= 0 {
senders[i].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[i..(i + 1)]).unwrap();
}
}
}
} else {
error!(
"io apic gets irq 0x{:x}, but has no ways to send it to guest threads",
irq
);
}
}
}
pub fn write(&mut self, offset: usize, value: u32) {
if offset == 0 {
self.reg = value;
} else {
match self.reg {
0 => self.id = value,
0x10..=IOAPIC_REG_MAX => {
self.value[self.reg as usize - 0x10] = value;
}
_ => error!(
"guest writes 0x{:x} to an invalid/read-only register 0x{:x}",
value, self.reg
),
}
}
}
pub fn read(&self, offset: usize) -> u32
|
}
pub fn ioapic_access(
_vcpu: &VCPU,
gth: &mut GuestThread,
gpa: usize,
reg_val: &mut u64,
_size: u8,
store: bool,
) -> Result<(), Error> {
let offset = gpa & 0xfffff;
if offset!= 0 && offset!= 0x10 {
error!(
"Bad register offset: {:x} and has to be 0x0 or 0x10",
offset
);
return Ok(());
}
let ioapic = >h.vm.ioapic;
if store {
ioapic.write().unwrap().write(offset, *reg_val as u32);
} else {
*reg_val = ioapic.read().unwrap().read(offset) as u64;
}
Ok(())
}
|
{
if offset == 0 {
self.reg
} else {
match self.reg {
0 => self.id,
1 => (IOAPIC_NUM_PINS << 16) | IOAPIC_VERSION, // 0x170011,
2 => self.arbid,
0x10..=IOAPIC_REG_MAX => self.value[self.reg as usize - 0x10],
_ => {
error!(
"guest reads from an invalid register 0x{:x}. return 0x{:x}",
self.reg,
u32::MAX
);
u32::MAX
}
}
}
}
|
identifier_body
|
ioapic.rs
|
/* SPDX-License-Identifier: GPL-2.0-only */
// to do: add a channel such that a local apic can send messages to io apic.
use crate::err::Error;
use crate::hv::interrupt_vcpu;
use crate::{GuestThread, VCPU};
use crossbeam_channel::{Receiver, Sender};
#[allow(unused_imports)]
use log::*;
use std::sync::{Arc, Mutex, RwLock};
const IOAPIC_NUM_PINS: u32 = 24;
const IOAPIC_REG_MAX: u32 = 0x10 + 2 * IOAPIC_NUM_PINS - 1;
const IOAPIC_VERSION: u32 = 0x11;
pub struct IoApic {
id: u32,
reg: u32,
arbid: u32,
pub value: [u32; 2 * IOAPIC_NUM_PINS as usize],
}
impl IoApic {
pub fn new() -> Self {
IoApic {
id: 0,
reg: 0,
arbid: 0,
value: [0; 2 * IOAPIC_NUM_PINS as usize],
}
}
pub fn dispatch(
intr_senders: Arc<Mutex<Option<Vec<Sender<u8>>>>>, // one sender for one guest thread
irq_receiver: Receiver<u32>, // receiver for collecting IRQs from hardware
ioapic: Arc<RwLock<IoApic>>,
vcpu_ids: Arc<RwLock<Vec<u32>>>, // the actual Hypervisor vcpu id of each guest thread
) {
for irq in irq_receiver.iter() {
let ioapic = ioapic.read().unwrap();
let vcpu_ids = vcpu_ids.read().unwrap();
let entry = ioapic.value[2 * irq as usize] as u64
| ((ioapic.value[2 * irq as usize + 1] as u64) << 32);
let vector = (entry & 0xff) as u8;
let dest = entry >> 56;
let senders = intr_senders.lock().unwrap();
if let Some(ref senders) = *senders {
if entry & (1 << 11) == 0 {
// physical mode
let dest = (dest & 0b1111) as usize;
senders[dest].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[dest..(dest + 1)]).unwrap();
} else {
// logical destination mode
for i in 0..8 {
if dest & (1 << i)!= 0 {
senders[i].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[i..(i + 1)]).unwrap();
}
}
}
} else {
error!(
"io apic gets irq 0x{:x}, but has no ways to send it to guest threads",
irq
);
}
}
}
pub fn write(&mut self, offset: usize, value: u32) {
if offset == 0 {
self.reg = value;
} else {
match self.reg {
0 => self.id = value,
0x10..=IOAPIC_REG_MAX => {
self.value[self.reg as usize - 0x10] = value;
}
_ => error!(
"guest writes 0x{:x} to an invalid/read-only register 0x{:x}",
value, self.reg
),
}
}
}
pub fn
|
(&self, offset: usize) -> u32 {
if offset == 0 {
self.reg
} else {
match self.reg {
0 => self.id,
1 => (IOAPIC_NUM_PINS << 16) | IOAPIC_VERSION, // 0x170011,
2 => self.arbid,
0x10..=IOAPIC_REG_MAX => self.value[self.reg as usize - 0x10],
_ => {
error!(
"guest reads from an invalid register 0x{:x}. return 0x{:x}",
self.reg,
u32::MAX
);
u32::MAX
}
}
}
}
}
pub fn ioapic_access(
_vcpu: &VCPU,
gth: &mut GuestThread,
gpa: usize,
reg_val: &mut u64,
_size: u8,
store: bool,
) -> Result<(), Error> {
let offset = gpa & 0xfffff;
if offset!= 0 && offset!= 0x10 {
error!(
"Bad register offset: {:x} and has to be 0x0 or 0x10",
offset
);
return Ok(());
}
let ioapic = >h.vm.ioapic;
if store {
ioapic.write().unwrap().write(offset, *reg_val as u32);
} else {
*reg_val = ioapic.read().unwrap().read(offset) as u64;
}
Ok(())
}
|
read
|
identifier_name
|
ioapic.rs
|
/* SPDX-License-Identifier: GPL-2.0-only */
// to do: add a channel such that a local apic can send messages to io apic.
use crate::err::Error;
use crate::hv::interrupt_vcpu;
use crate::{GuestThread, VCPU};
use crossbeam_channel::{Receiver, Sender};
#[allow(unused_imports)]
use log::*;
use std::sync::{Arc, Mutex, RwLock};
const IOAPIC_NUM_PINS: u32 = 24;
const IOAPIC_REG_MAX: u32 = 0x10 + 2 * IOAPIC_NUM_PINS - 1;
const IOAPIC_VERSION: u32 = 0x11;
pub struct IoApic {
id: u32,
reg: u32,
arbid: u32,
pub value: [u32; 2 * IOAPIC_NUM_PINS as usize],
}
impl IoApic {
pub fn new() -> Self {
IoApic {
id: 0,
reg: 0,
arbid: 0,
value: [0; 2 * IOAPIC_NUM_PINS as usize],
}
}
pub fn dispatch(
intr_senders: Arc<Mutex<Option<Vec<Sender<u8>>>>>, // one sender for one guest thread
irq_receiver: Receiver<u32>, // receiver for collecting IRQs from hardware
ioapic: Arc<RwLock<IoApic>>,
vcpu_ids: Arc<RwLock<Vec<u32>>>, // the actual Hypervisor vcpu id of each guest thread
) {
for irq in irq_receiver.iter() {
let ioapic = ioapic.read().unwrap();
let vcpu_ids = vcpu_ids.read().unwrap();
let entry = ioapic.value[2 * irq as usize] as u64
| ((ioapic.value[2 * irq as usize + 1] as u64) << 32);
let vector = (entry & 0xff) as u8;
let dest = entry >> 56;
let senders = intr_senders.lock().unwrap();
if let Some(ref senders) = *senders {
if entry & (1 << 11) == 0 {
// physical mode
let dest = (dest & 0b1111) as usize;
senders[dest].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[dest..(dest + 1)]).unwrap();
} else {
// logical destination mode
for i in 0..8 {
if dest & (1 << i)!= 0 {
senders[i].send(vector).unwrap();
interrupt_vcpu(&vcpu_ids[i..(i + 1)]).unwrap();
}
}
}
} else {
error!(
"io apic gets irq 0x{:x}, but has no ways to send it to guest threads",
irq
);
}
}
}
pub fn write(&mut self, offset: usize, value: u32) {
if offset == 0 {
self.reg = value;
} else {
match self.reg {
0 => self.id = value,
0x10..=IOAPIC_REG_MAX => {
self.value[self.reg as usize - 0x10] = value;
}
_ => error!(
"guest writes 0x{:x} to an invalid/read-only register 0x{:x}",
value, self.reg
),
}
}
}
pub fn read(&self, offset: usize) -> u32 {
if offset == 0 {
self.reg
} else {
match self.reg {
0 => self.id,
1 => (IOAPIC_NUM_PINS << 16) | IOAPIC_VERSION, // 0x170011,
2 => self.arbid,
0x10..=IOAPIC_REG_MAX => self.value[self.reg as usize - 0x10],
_ => {
error!(
"guest reads from an invalid register 0x{:x}. return 0x{:x}",
self.reg,
u32::MAX
);
u32::MAX
}
}
}
}
}
pub fn ioapic_access(
_vcpu: &VCPU,
gth: &mut GuestThread,
gpa: usize,
reg_val: &mut u64,
_size: u8,
store: bool,
) -> Result<(), Error> {
let offset = gpa & 0xfffff;
if offset!= 0 && offset!= 0x10
|
let ioapic = >h.vm.ioapic;
if store {
ioapic.write().unwrap().write(offset, *reg_val as u32);
} else {
*reg_val = ioapic.read().unwrap().read(offset) as u64;
}
Ok(())
}
|
{
error!(
"Bad register offset: {:x} and has to be 0x0 or 0x10",
offset
);
return Ok(());
}
|
conditional_block
|
meminfo.rs
|
extern crate linux_stats;
use linux_stats::MemInfo;
const MEMINFO_1: MemInfo = MemInfo {
mem_total: 3521920,
mem_free: 1878240,
mem_available: 2275916,
bufers: 35428,
cached: 386132,
swap_cached: 0,
active: 134352,
inactive: 266336,
active_anon: 1094728,
inactive_anon: 17664,
active_file: 134352,
inactive_file: 266336,
unevictable: 3660,
mlocked: 3660,
swap_total: 0,
swap_free: 0,
dirty: 12,
writeback: 0,
anon_pages: 1095172,
mapped: 71384,
shmem: 18456,
slab: 50800,
s_reclaimable: 24684,
s_unreclaim: 26116,
kernel_stack: 5584,
page_tables: 6184,
nfs_unstable: 0,
bounce: 0,
writeback_tmp: 0,
commit_limit: 1760960,
committed_as: 2064016,
vmalloc_total: 34359738367,
vmalloc_used: 0,
vmalloc_chunk: 0,
hardware_corrupted: 0,
anon_huge_pages: 1013760,
cma_total: 0,
cma_free: 0,
huge_pages_total: 0,
huge_pages_free: 0,
huge_pages_rsvd: 0,
huge_pages_surp: 0,
hugepagesize: 2048,
direct_map_4k: 67520,
direct_map_2m: 3602432,
};
const MEMINFO_2: MemInfo = MemInfo {
mem_total: 32828552,
mem_free: 12195628,
mem_available: 13725248,
bufers: 185048,
cached: 1876616,
swap_cached: 0,
active: 806832,
inactive: 1015204,
active_anon: 1531372,
inactive_anon: 105576,
active_file: 806832,
inactive_file: 1015204,
unevictable: 132464,
mlocked: 0,
swap_total: 4194280,
swap_free: 4194280,
dirty: 224,
writeback: 0,
anon_pages: 1529596,
mapped: 16887024,
shmem: 0,
slab: 354316,
s_reclaimable: 155152,
s_unreclaim: 199164,
kernel_stack: 8912,
page_tables: 47852,
nfs_unstable: 0,
bounce: 0,
writeback_tmp: 0,
commit_limit: 20608556,
committed_as: 20066912,
vmalloc_total: 34359738367,
vmalloc_used: 0,
vmalloc_chunk: 0,
hardware_corrupted: 0,
anon_huge_pages: 0,
cma_total: 0,
cma_free: 0,
huge_pages_total: 0,
huge_pages_free: 0,
huge_pages_rsvd: 0,
huge_pages_surp: 0,
hugepagesize: 2048,
direct_map_4k: 215212,
direct_map_2m: 8062976,
};
const MEMINFO_1_RAW: &'static str = include_str!("./meminfo-1");
const MEMINFO_2_RAW: &'static str = include_str!("./meminfo-2");
#[test]
fn
|
() {
assert_eq!("".parse::<MemInfo>().unwrap(), Default::default());
}
#[test]
fn meminfo_1() {
assert_eq!(MEMINFO_1_RAW.parse::<MemInfo>().unwrap(), MEMINFO_1);
}
#[test]
fn meminfo_2() {
assert_eq!(MEMINFO_2_RAW.parse::<MemInfo>().unwrap(), MEMINFO_2);
}
|
meminfo_empty
|
identifier_name
|
meminfo.rs
|
extern crate linux_stats;
use linux_stats::MemInfo;
const MEMINFO_1: MemInfo = MemInfo {
mem_total: 3521920,
mem_free: 1878240,
mem_available: 2275916,
bufers: 35428,
cached: 386132,
swap_cached: 0,
active: 134352,
inactive: 266336,
active_anon: 1094728,
inactive_anon: 17664,
active_file: 134352,
inactive_file: 266336,
unevictable: 3660,
mlocked: 3660,
swap_total: 0,
swap_free: 0,
dirty: 12,
writeback: 0,
anon_pages: 1095172,
mapped: 71384,
shmem: 18456,
slab: 50800,
s_reclaimable: 24684,
s_unreclaim: 26116,
kernel_stack: 5584,
page_tables: 6184,
nfs_unstable: 0,
bounce: 0,
writeback_tmp: 0,
commit_limit: 1760960,
committed_as: 2064016,
vmalloc_total: 34359738367,
vmalloc_used: 0,
vmalloc_chunk: 0,
hardware_corrupted: 0,
anon_huge_pages: 1013760,
cma_total: 0,
cma_free: 0,
|
huge_pages_total: 0,
huge_pages_free: 0,
huge_pages_rsvd: 0,
huge_pages_surp: 0,
hugepagesize: 2048,
direct_map_4k: 67520,
direct_map_2m: 3602432,
};
const MEMINFO_2: MemInfo = MemInfo {
mem_total: 32828552,
mem_free: 12195628,
mem_available: 13725248,
bufers: 185048,
cached: 1876616,
swap_cached: 0,
active: 806832,
inactive: 1015204,
active_anon: 1531372,
inactive_anon: 105576,
active_file: 806832,
inactive_file: 1015204,
unevictable: 132464,
mlocked: 0,
swap_total: 4194280,
swap_free: 4194280,
dirty: 224,
writeback: 0,
anon_pages: 1529596,
mapped: 16887024,
shmem: 0,
slab: 354316,
s_reclaimable: 155152,
s_unreclaim: 199164,
kernel_stack: 8912,
page_tables: 47852,
nfs_unstable: 0,
bounce: 0,
writeback_tmp: 0,
commit_limit: 20608556,
committed_as: 20066912,
vmalloc_total: 34359738367,
vmalloc_used: 0,
vmalloc_chunk: 0,
hardware_corrupted: 0,
anon_huge_pages: 0,
cma_total: 0,
cma_free: 0,
huge_pages_total: 0,
huge_pages_free: 0,
huge_pages_rsvd: 0,
huge_pages_surp: 0,
hugepagesize: 2048,
direct_map_4k: 215212,
direct_map_2m: 8062976,
};
const MEMINFO_1_RAW: &'static str = include_str!("./meminfo-1");
const MEMINFO_2_RAW: &'static str = include_str!("./meminfo-2");
#[test]
fn meminfo_empty() {
assert_eq!("".parse::<MemInfo>().unwrap(), Default::default());
}
#[test]
fn meminfo_1() {
assert_eq!(MEMINFO_1_RAW.parse::<MemInfo>().unwrap(), MEMINFO_1);
}
#[test]
fn meminfo_2() {
assert_eq!(MEMINFO_2_RAW.parse::<MemInfo>().unwrap(), MEMINFO_2);
}
|
random_line_split
|
|
meminfo.rs
|
extern crate linux_stats;
use linux_stats::MemInfo;
const MEMINFO_1: MemInfo = MemInfo {
mem_total: 3521920,
mem_free: 1878240,
mem_available: 2275916,
bufers: 35428,
cached: 386132,
swap_cached: 0,
active: 134352,
inactive: 266336,
active_anon: 1094728,
inactive_anon: 17664,
active_file: 134352,
inactive_file: 266336,
unevictable: 3660,
mlocked: 3660,
swap_total: 0,
swap_free: 0,
dirty: 12,
writeback: 0,
anon_pages: 1095172,
mapped: 71384,
shmem: 18456,
slab: 50800,
s_reclaimable: 24684,
s_unreclaim: 26116,
kernel_stack: 5584,
page_tables: 6184,
nfs_unstable: 0,
bounce: 0,
writeback_tmp: 0,
commit_limit: 1760960,
committed_as: 2064016,
vmalloc_total: 34359738367,
vmalloc_used: 0,
vmalloc_chunk: 0,
hardware_corrupted: 0,
anon_huge_pages: 1013760,
cma_total: 0,
cma_free: 0,
huge_pages_total: 0,
huge_pages_free: 0,
huge_pages_rsvd: 0,
huge_pages_surp: 0,
hugepagesize: 2048,
direct_map_4k: 67520,
direct_map_2m: 3602432,
};
const MEMINFO_2: MemInfo = MemInfo {
mem_total: 32828552,
mem_free: 12195628,
mem_available: 13725248,
bufers: 185048,
cached: 1876616,
swap_cached: 0,
active: 806832,
inactive: 1015204,
active_anon: 1531372,
inactive_anon: 105576,
active_file: 806832,
inactive_file: 1015204,
unevictable: 132464,
mlocked: 0,
swap_total: 4194280,
swap_free: 4194280,
dirty: 224,
writeback: 0,
anon_pages: 1529596,
mapped: 16887024,
shmem: 0,
slab: 354316,
s_reclaimable: 155152,
s_unreclaim: 199164,
kernel_stack: 8912,
page_tables: 47852,
nfs_unstable: 0,
bounce: 0,
writeback_tmp: 0,
commit_limit: 20608556,
committed_as: 20066912,
vmalloc_total: 34359738367,
vmalloc_used: 0,
vmalloc_chunk: 0,
hardware_corrupted: 0,
anon_huge_pages: 0,
cma_total: 0,
cma_free: 0,
huge_pages_total: 0,
huge_pages_free: 0,
huge_pages_rsvd: 0,
huge_pages_surp: 0,
hugepagesize: 2048,
direct_map_4k: 215212,
direct_map_2m: 8062976,
};
const MEMINFO_1_RAW: &'static str = include_str!("./meminfo-1");
const MEMINFO_2_RAW: &'static str = include_str!("./meminfo-2");
#[test]
fn meminfo_empty()
|
#[test]
fn meminfo_1() {
assert_eq!(MEMINFO_1_RAW.parse::<MemInfo>().unwrap(), MEMINFO_1);
}
#[test]
fn meminfo_2() {
assert_eq!(MEMINFO_2_RAW.parse::<MemInfo>().unwrap(), MEMINFO_2);
}
|
{
assert_eq!("".parse::<MemInfo>().unwrap(), Default::default());
}
|
identifier_body
|
harfbuzz.rs
|
codepoint: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
}
impl ShapedGlyphData {
pub fn
|
(buffer: *mut hb_buffer_t) -> ShapedGlyphData {
unsafe {
let mut glyph_count = 0;
let glyph_infos = hb_buffer_get_glyph_infos(buffer, &mut glyph_count);
assert!(!glyph_infos.is_null());
let mut pos_count = 0;
let pos_infos = hb_buffer_get_glyph_positions(buffer, &mut pos_count);
assert!(!pos_infos.is_null());
assert_eq!(glyph_count, pos_count);
ShapedGlyphData {
count: glyph_count as usize,
glyph_infos: glyph_infos,
pos_infos: pos_infos,
}
}
}
#[inline(always)]
fn byte_offset_of_glyph(&self, i: usize) -> u32 {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i as isize);
(*glyph_info_i).cluster
}
}
pub fn len(&self) -> usize {
self.count
}
/// Returns shaped glyph data for one glyph, and updates the y-position of the pen.
pub fn entry_for_glyph(&self, i: usize, y_pos: &mut Au) -> ShapedGlyphEntry {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i as isize);
let pos_info_i = self.pos_infos.offset(i as isize);
let x_offset = Shaper::fixed_to_float((*pos_info_i).x_offset);
let y_offset = Shaper::fixed_to_float((*pos_info_i).y_offset);
let x_advance = Shaper::fixed_to_float((*pos_info_i).x_advance);
let y_advance = Shaper::fixed_to_float((*pos_info_i).y_advance);
let x_offset = Au::from_f64_px(x_offset);
let y_offset = Au::from_f64_px(y_offset);
let x_advance = Au::from_f64_px(x_advance);
let y_advance = Au::from_f64_px(y_advance);
let offset = if x_offset == Au(0) && y_offset == Au(0) && y_advance == Au(0) {
None
} else {
// adjust the pen..
if y_advance > Au(0) {
*y_pos = *y_pos - y_advance;
}
Some(Point2D::new(x_offset, *y_pos - y_offset))
};
ShapedGlyphEntry {
codepoint: (*glyph_info_i).codepoint as GlyphId,
advance: x_advance,
offset: offset,
}
}
}
}
#[derive(Debug)]
pub struct Shaper {
hb_face: *mut hb_face_t,
hb_font: *mut hb_font_t,
font: *const Font,
}
impl Drop for Shaper {
fn drop(&mut self) {
unsafe {
assert!(!self.hb_face.is_null());
hb_face_destroy(self.hb_face);
assert!(!self.hb_font.is_null());
hb_font_destroy(self.hb_font);
}
}
}
impl Shaper {
pub fn new(font: *const Font) -> Shaper {
unsafe {
let hb_face: *mut hb_face_t =
hb_face_create_for_tables(Some(font_table_func),
font as *const c_void as *mut c_void,
None);
let hb_font: *mut hb_font_t = hb_font_create(hb_face);
// Set points-per-em. if zero, performs no hinting in that direction.
let pt_size = (*font).actual_pt_size.to_f64_px();
hb_font_set_ppem(hb_font, pt_size as c_uint, pt_size as c_uint);
// Set scaling. Note that this takes 16.16 fixed point.
hb_font_set_scale(hb_font,
Shaper::float_to_fixed(pt_size) as c_int,
Shaper::float_to_fixed(pt_size) as c_int);
// configure static function callbacks.
hb_font_set_funcs(hb_font, HB_FONT_FUNCS.0, font as *mut Font as *mut c_void, None);
Shaper {
hb_face: hb_face,
hb_font: hb_font,
font: font,
}
}
}
fn float_to_fixed(f: f64) -> i32 {
float_to_fixed(16, f)
}
fn fixed_to_float(i: hb_position_t) -> f64 {
fixed_to_float(16, i)
}
}
impl ShaperMethods for Shaper {
/// Calculate the layout metrics associated with the given text when painted in a specific
/// font.
fn shape_text(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
unsafe {
let hb_buffer: *mut hb_buffer_t = hb_buffer_create();
hb_buffer_set_direction(hb_buffer, if options.flags.contains(ShapingFlags::RTL_FLAG) {
HB_DIRECTION_RTL
} else {
HB_DIRECTION_LTR
});
hb_buffer_set_script(hb_buffer, options.script.to_hb_script());
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
let mut features = Vec::new();
if options.flags.contains(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: LIGA,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
if options.flags.contains(ShapingFlags::DISABLE_KERNING_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: KERN,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
hb_shape(self.hb_font, hb_buffer, features.as_mut_ptr(), features.len() as u32);
self.save_glyph_results(text, options, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self,
text: &str,
options: &ShapingOptions,
glyphs: &mut GlyphStore,
buffer: *mut hb_buffer_t) {
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len();
debug!("Shaped text[byte count={}], got back {} glyph info records.",
byte_max,
glyph_count);
// make map of what chars have glyphs
let mut byte_to_glyph = vec![NO_GLYPH; byte_max];
debug!("(glyph idx) -> (text byte offset)");
for i in 0..glyph_data.len() {
let loc = glyph_data.byte_offset_of_glyph(i) as usize;
if loc < byte_max {
byte_to_glyph[loc] = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:?}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {:?} --> {}", i, ch, byte_to_glyph[i]);
}
let mut glyph_span = 0..0;
let mut byte_range = 0..0;
let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character associations, 2+ glyphs and 1+ chars can be
// processed.
while glyph_span.start < glyph_count {
debug!("Processing glyph at idx={}", glyph_span.start);
glyph_span.end = glyph_span.start;
byte_range.end = glyph_data.byte_offset_of_glyph(glyph_span.start) as usize;
while byte_range.end < byte_max {
byte_range.end += 1;
// Extend the byte range to include any following byte without its own glyph.
while byte_range.end < byte_max && byte_to_glyph[byte_range.end] == NO_GLYPH {
byte_range.end += 1;
}
// Extend the glyph range to include all glyphs covered by bytes processed so far.
let mut max_glyph_idx = glyph_span.end;
for glyph_idx in &byte_to_glyph[byte_range.clone()] {
if *glyph_idx!= NO_GLYPH {
max_glyph_idx = cmp::max(*glyph_idx as usize + 1, max_glyph_idx);
}
}
if max_glyph_idx > glyph_span.end {
glyph_span.end = max_glyph_idx;
debug!("Extended glyph span to {:?}", glyph_span);
}
// if there's just one glyph, then we don't need further checks.
if glyph_span.len() == 1 { break; }
// if no glyphs were found yet, extend the char byte range more.
if glyph_span.len() == 0 { continue; }
// If byte_range now includes all the byte offsets found in glyph_span, then we
// have found a contiguous "cluster" and can stop extending it.
let mut all_glyphs_are_within_cluster: bool = true;
for j in glyph_span.clone() {
let loc = glyph_data.byte_offset_of_glyph(j) as usize;
if!(byte_range.start <= loc && loc < byte_range.end) {
all_glyphs_are_within_cluster = false;
break
}
}
if all_glyphs_are_within_cluster {
break
}
// Otherwise, the bytes we have seen so far correspond to a non-contiguous set of
// glyphs. Keep extending byte_range until we fill in all the holes in the glyph
// span or reach the end of the text.
}
assert!(byte_range.len() > 0);
assert!(glyph_span.len() > 0);
// Now byte_range is the ligature clump formed by the glyphs in glyph_span.
// We will save these glyphs to the glyph store at the index of the first byte.
let byte_idx = ByteIndex(byte_range.start as isize);
if glyph_span.len() == 1 {
// Fast path: 1-to-1 mapping of byte offset to single glyph.
//
// TODO(Issue #214): cluster ranges need to be computed before
// shaping, and then consulted here.
// for now, just pretend that every character is a cluster start.
// (i.e., pretend there are no combining character sequences).
// 1-to-1 mapping of character to glyph also treated as ligature start.
//
// NB: When we acquire the ability to handle ligatures that cross word boundaries,
// we'll need to do something special to handle `word-spacing` properly.
let character = text[byte_range.clone()].chars().next().unwrap();
if is_bidi_control(character) {
// Don't add any glyphs for bidi control chars
} else if character == '\t' {
// Treat tabs in pre-formatted text as a fixed number of spaces.
//
// TODO: Proper tab stops.
const TAB_COLS: i32 = 8;
let (space_glyph_id, space_advance) = glyph_space_advance(self.font);
let advance = Au::from_f64_px(space_advance) * TAB_COLS;
let data = GlyphData::new(space_glyph_id,
advance,
Default::default(),
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
} else {
let shape = glyph_data.entry_for_glyph(glyph_span.start, &mut y_pos);
let advance = self.advance_for_shaped_glyph(shape.advance, character, options);
let data = GlyphData::new(shape.codepoint,
advance,
shape.offset,
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
}
} else {
// collect all glyphs to be assigned to the first character.
let mut datas = vec!();
for glyph_i in glyph_span.clone() {
let shape = glyph_data.entry_for_glyph(glyph_i, &mut y_pos);
datas.push(GlyphData::new(shape.codepoint,
shape.advance,
shape.offset,
true, // treat as cluster start
glyph_i > glyph_span.start));
// all but first are ligature continuations
}
// now add the detailed glyph entry.
glyphs.add_glyphs_for_byte_index(byte_idx, &datas);
}
glyph_span.start = glyph_span.end;
byte_range.start = byte_range.end;
}
// this must be called after adding all glyph data; it sorts the
// lookup table for finding detailed glyphs by associated char index.
glyphs.finalize_changes();
}
fn advance_for_shaped_glyph(&self, mut advance: Au, character: char, options: &ShapingOptions)
-> Au {
if let Some(letter_spacing) = options.letter_spacing {
advance = advance + letter_spacing;
};
// CSS 2.1 § 16.4 states that "word spacing affects each space (U+0020) and non-breaking
// space (U+00A0) left in the text after the white space processing rules have been
// applied. The effect of the property on other word-separator characters is undefined."
// We elect to only space the two required code points.
if character =='' || character == '\u{a0}' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au::new((advance.0 as f32 * percent.into_inner()) as i32);
}
advance
}
}
/// Callbacks from Harfbuzz when font map and glyph advance lookup needed.
struct FontFuncs(*mut hb_font_funcs_t);
unsafe impl Sync for FontFuncs {}
lazy_static! {
static ref HB_FONT_FUNCS: FontFuncs = unsafe {
let hb_funcs = hb_font_funcs_create();
hb_font_funcs_set_nominal_glyph_func(hb_funcs, Some(glyph_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_advance_func(
hb_funcs, Some(glyph_h_advance_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_kerning_func(
hb_funcs, Some(glyph_h_kerning_func), ptr::null_mut(), None);
FontFuncs(hb_funcs)
};
}
extern fn glyph_func(_: *mut hb_font_t,
font_data: *mut c_void,
unicode: hb_codepoint_t,
glyph: *mut hb_codepoint_t,
_: *mut c_void)
-> hb_bool_t {
let font: *const Font = font_data as *const Font;
assert!(!font.is_null());
unsafe {
match (*font).glyph_index(char::from_u32(unicode).unwrap()) {
Some(g) => {
|
new
|
identifier_name
|
harfbuzz.rs
|
y_offset = Au::from_f64_px(y_offset);
let x_advance = Au::from_f64_px(x_advance);
let y_advance = Au::from_f64_px(y_advance);
let offset = if x_offset == Au(0) && y_offset == Au(0) && y_advance == Au(0) {
None
} else {
// adjust the pen..
if y_advance > Au(0) {
*y_pos = *y_pos - y_advance;
}
Some(Point2D::new(x_offset, *y_pos - y_offset))
};
ShapedGlyphEntry {
codepoint: (*glyph_info_i).codepoint as GlyphId,
advance: x_advance,
offset: offset,
}
}
}
}
#[derive(Debug)]
pub struct Shaper {
hb_face: *mut hb_face_t,
hb_font: *mut hb_font_t,
font: *const Font,
}
impl Drop for Shaper {
fn drop(&mut self) {
unsafe {
assert!(!self.hb_face.is_null());
hb_face_destroy(self.hb_face);
assert!(!self.hb_font.is_null());
hb_font_destroy(self.hb_font);
}
}
}
impl Shaper {
pub fn new(font: *const Font) -> Shaper {
unsafe {
let hb_face: *mut hb_face_t =
hb_face_create_for_tables(Some(font_table_func),
font as *const c_void as *mut c_void,
None);
let hb_font: *mut hb_font_t = hb_font_create(hb_face);
// Set points-per-em. if zero, performs no hinting in that direction.
let pt_size = (*font).actual_pt_size.to_f64_px();
hb_font_set_ppem(hb_font, pt_size as c_uint, pt_size as c_uint);
// Set scaling. Note that this takes 16.16 fixed point.
hb_font_set_scale(hb_font,
Shaper::float_to_fixed(pt_size) as c_int,
Shaper::float_to_fixed(pt_size) as c_int);
// configure static function callbacks.
hb_font_set_funcs(hb_font, HB_FONT_FUNCS.0, font as *mut Font as *mut c_void, None);
Shaper {
hb_face: hb_face,
hb_font: hb_font,
font: font,
}
}
}
fn float_to_fixed(f: f64) -> i32 {
float_to_fixed(16, f)
}
fn fixed_to_float(i: hb_position_t) -> f64 {
fixed_to_float(16, i)
}
}
impl ShaperMethods for Shaper {
/// Calculate the layout metrics associated with the given text when painted in a specific
/// font.
fn shape_text(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
unsafe {
let hb_buffer: *mut hb_buffer_t = hb_buffer_create();
hb_buffer_set_direction(hb_buffer, if options.flags.contains(ShapingFlags::RTL_FLAG) {
HB_DIRECTION_RTL
} else {
HB_DIRECTION_LTR
});
hb_buffer_set_script(hb_buffer, options.script.to_hb_script());
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
let mut features = Vec::new();
if options.flags.contains(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: LIGA,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
if options.flags.contains(ShapingFlags::DISABLE_KERNING_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: KERN,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
hb_shape(self.hb_font, hb_buffer, features.as_mut_ptr(), features.len() as u32);
self.save_glyph_results(text, options, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self,
text: &str,
options: &ShapingOptions,
glyphs: &mut GlyphStore,
buffer: *mut hb_buffer_t) {
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len();
debug!("Shaped text[byte count={}], got back {} glyph info records.",
byte_max,
glyph_count);
// make map of what chars have glyphs
let mut byte_to_glyph = vec![NO_GLYPH; byte_max];
debug!("(glyph idx) -> (text byte offset)");
for i in 0..glyph_data.len() {
let loc = glyph_data.byte_offset_of_glyph(i) as usize;
if loc < byte_max {
byte_to_glyph[loc] = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:?}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {:?} --> {}", i, ch, byte_to_glyph[i]);
}
let mut glyph_span = 0..0;
let mut byte_range = 0..0;
let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character associations, 2+ glyphs and 1+ chars can be
// processed.
while glyph_span.start < glyph_count {
debug!("Processing glyph at idx={}", glyph_span.start);
glyph_span.end = glyph_span.start;
byte_range.end = glyph_data.byte_offset_of_glyph(glyph_span.start) as usize;
while byte_range.end < byte_max {
byte_range.end += 1;
// Extend the byte range to include any following byte without its own glyph.
while byte_range.end < byte_max && byte_to_glyph[byte_range.end] == NO_GLYPH {
byte_range.end += 1;
}
// Extend the glyph range to include all glyphs covered by bytes processed so far.
let mut max_glyph_idx = glyph_span.end;
for glyph_idx in &byte_to_glyph[byte_range.clone()] {
if *glyph_idx!= NO_GLYPH {
max_glyph_idx = cmp::max(*glyph_idx as usize + 1, max_glyph_idx);
}
}
if max_glyph_idx > glyph_span.end {
glyph_span.end = max_glyph_idx;
debug!("Extended glyph span to {:?}", glyph_span);
}
// if there's just one glyph, then we don't need further checks.
if glyph_span.len() == 1 { break; }
// if no glyphs were found yet, extend the char byte range more.
if glyph_span.len() == 0 { continue; }
// If byte_range now includes all the byte offsets found in glyph_span, then we
// have found a contiguous "cluster" and can stop extending it.
let mut all_glyphs_are_within_cluster: bool = true;
for j in glyph_span.clone() {
let loc = glyph_data.byte_offset_of_glyph(j) as usize;
if!(byte_range.start <= loc && loc < byte_range.end) {
all_glyphs_are_within_cluster = false;
break
}
}
if all_glyphs_are_within_cluster {
break
}
// Otherwise, the bytes we have seen so far correspond to a non-contiguous set of
// glyphs. Keep extending byte_range until we fill in all the holes in the glyph
// span or reach the end of the text.
}
assert!(byte_range.len() > 0);
assert!(glyph_span.len() > 0);
// Now byte_range is the ligature clump formed by the glyphs in glyph_span.
// We will save these glyphs to the glyph store at the index of the first byte.
let byte_idx = ByteIndex(byte_range.start as isize);
if glyph_span.len() == 1 {
// Fast path: 1-to-1 mapping of byte offset to single glyph.
//
// TODO(Issue #214): cluster ranges need to be computed before
// shaping, and then consulted here.
// for now, just pretend that every character is a cluster start.
// (i.e., pretend there are no combining character sequences).
// 1-to-1 mapping of character to glyph also treated as ligature start.
//
// NB: When we acquire the ability to handle ligatures that cross word boundaries,
// we'll need to do something special to handle `word-spacing` properly.
let character = text[byte_range.clone()].chars().next().unwrap();
if is_bidi_control(character) {
// Don't add any glyphs for bidi control chars
} else if character == '\t' {
// Treat tabs in pre-formatted text as a fixed number of spaces.
//
// TODO: Proper tab stops.
const TAB_COLS: i32 = 8;
let (space_glyph_id, space_advance) = glyph_space_advance(self.font);
let advance = Au::from_f64_px(space_advance) * TAB_COLS;
let data = GlyphData::new(space_glyph_id,
advance,
Default::default(),
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
} else {
let shape = glyph_data.entry_for_glyph(glyph_span.start, &mut y_pos);
let advance = self.advance_for_shaped_glyph(shape.advance, character, options);
let data = GlyphData::new(shape.codepoint,
advance,
shape.offset,
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
}
} else {
// collect all glyphs to be assigned to the first character.
let mut datas = vec!();
for glyph_i in glyph_span.clone() {
let shape = glyph_data.entry_for_glyph(glyph_i, &mut y_pos);
datas.push(GlyphData::new(shape.codepoint,
shape.advance,
shape.offset,
true, // treat as cluster start
glyph_i > glyph_span.start));
// all but first are ligature continuations
}
// now add the detailed glyph entry.
glyphs.add_glyphs_for_byte_index(byte_idx, &datas);
}
glyph_span.start = glyph_span.end;
byte_range.start = byte_range.end;
}
// this must be called after adding all glyph data; it sorts the
// lookup table for finding detailed glyphs by associated char index.
glyphs.finalize_changes();
}
fn advance_for_shaped_glyph(&self, mut advance: Au, character: char, options: &ShapingOptions)
-> Au {
if let Some(letter_spacing) = options.letter_spacing {
advance = advance + letter_spacing;
};
// CSS 2.1 § 16.4 states that "word spacing affects each space (U+0020) and non-breaking
// space (U+00A0) left in the text after the white space processing rules have been
// applied. The effect of the property on other word-separator characters is undefined."
// We elect to only space the two required code points.
if character =='' || character == '\u{a0}' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au::new((advance.0 as f32 * percent.into_inner()) as i32);
}
advance
}
}
/// Callbacks from Harfbuzz when font map and glyph advance lookup needed.
struct FontFuncs(*mut hb_font_funcs_t);
unsafe impl Sync for FontFuncs {}
lazy_static! {
static ref HB_FONT_FUNCS: FontFuncs = unsafe {
let hb_funcs = hb_font_funcs_create();
hb_font_funcs_set_nominal_glyph_func(hb_funcs, Some(glyph_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_advance_func(
hb_funcs, Some(glyph_h_advance_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_kerning_func(
hb_funcs, Some(glyph_h_kerning_func), ptr::null_mut(), None);
FontFuncs(hb_funcs)
};
}
extern fn glyph_func(_: *mut hb_font_t,
font_data: *mut c_void,
unicode: hb_codepoint_t,
glyph: *mut hb_codepoint_t,
_: *mut c_void)
-> hb_bool_t {
let font: *const Font = font_data as *const Font;
assert!(!font.is_null());
unsafe {
match (*font).glyph_index(char::from_u32(unicode).unwrap()) {
Some(g) => {
*glyph = g as hb_codepoint_t;
true as hb_bool_t
}
None => false as hb_bool_t
}
}
}
extern fn glyph_h_advance_func(_: *mut hb_font_t,
font_data: *mut c_void,
glyph: hb_codepoint_t,
_: *mut c_void)
-> hb_position_t {
let font: *mut Font = font_data as *mut Font;
assert!(!font.is_null());
unsafe {
let advance = (*font).glyph_h_advance(glyph as GlyphId);
Shaper::float_to_fixed(advance)
}
}
fn glyph_space_advance(font: *const Font) -> (hb_codepoint_t, f64) {
let space_unicode ='';
let space_glyph: hb_codepoint_t;
match unsafe { (*font).glyph_index(space_unicode) } {
Some(g) => {
space_glyph = g as hb_codepoint_t;
}
None => panic!("No space info")
}
let space_advance = unsafe { (*font).glyph_h_advance(space_glyph as GlyphId) };
(space_glyph, space_advance)
}
extern fn glyph_h_kerning_func(_: *mut hb_font_t,
font_data: *mut c_void,
first_glyph: hb_codepoint_t,
second_glyph: hb_codepoint_t,
_: *mut c_void)
-> hb_position_t {
let font: *mut Font = font_data as *mut Font;
assert!(!font.is_null());
unsafe {
let advance = (*font).glyph_h_kerning(first_glyph as GlyphId, second_glyph as GlyphId);
|
Shaper::float_to_fixed(advance)
}
}
|
random_line_split
|
|
harfbuzz.rs
|
codepoint: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
}
impl ShapedGlyphData {
pub fn new(buffer: *mut hb_buffer_t) -> ShapedGlyphData {
unsafe {
let mut glyph_count = 0;
let glyph_infos = hb_buffer_get_glyph_infos(buffer, &mut glyph_count);
assert!(!glyph_infos.is_null());
let mut pos_count = 0;
let pos_infos = hb_buffer_get_glyph_positions(buffer, &mut pos_count);
assert!(!pos_infos.is_null());
assert_eq!(glyph_count, pos_count);
ShapedGlyphData {
count: glyph_count as usize,
glyph_infos: glyph_infos,
pos_infos: pos_infos,
}
}
}
#[inline(always)]
fn byte_offset_of_glyph(&self, i: usize) -> u32 {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i as isize);
(*glyph_info_i).cluster
}
}
pub fn len(&self) -> usize {
self.count
}
/// Returns shaped glyph data for one glyph, and updates the y-position of the pen.
pub fn entry_for_glyph(&self, i: usize, y_pos: &mut Au) -> ShapedGlyphEntry {
assert!(i < self.count);
unsafe {
let glyph_info_i = self.glyph_infos.offset(i as isize);
let pos_info_i = self.pos_infos.offset(i as isize);
let x_offset = Shaper::fixed_to_float((*pos_info_i).x_offset);
let y_offset = Shaper::fixed_to_float((*pos_info_i).y_offset);
let x_advance = Shaper::fixed_to_float((*pos_info_i).x_advance);
let y_advance = Shaper::fixed_to_float((*pos_info_i).y_advance);
let x_offset = Au::from_f64_px(x_offset);
let y_offset = Au::from_f64_px(y_offset);
let x_advance = Au::from_f64_px(x_advance);
let y_advance = Au::from_f64_px(y_advance);
let offset = if x_offset == Au(0) && y_offset == Au(0) && y_advance == Au(0) {
None
} else {
// adjust the pen..
if y_advance > Au(0) {
*y_pos = *y_pos - y_advance;
}
Some(Point2D::new(x_offset, *y_pos - y_offset))
};
ShapedGlyphEntry {
codepoint: (*glyph_info_i).codepoint as GlyphId,
advance: x_advance,
offset: offset,
}
}
}
}
#[derive(Debug)]
pub struct Shaper {
hb_face: *mut hb_face_t,
hb_font: *mut hb_font_t,
font: *const Font,
}
impl Drop for Shaper {
fn drop(&mut self) {
unsafe {
assert!(!self.hb_face.is_null());
hb_face_destroy(self.hb_face);
assert!(!self.hb_font.is_null());
hb_font_destroy(self.hb_font);
}
}
}
impl Shaper {
pub fn new(font: *const Font) -> Shaper {
unsafe {
let hb_face: *mut hb_face_t =
hb_face_create_for_tables(Some(font_table_func),
font as *const c_void as *mut c_void,
None);
let hb_font: *mut hb_font_t = hb_font_create(hb_face);
// Set points-per-em. if zero, performs no hinting in that direction.
let pt_size = (*font).actual_pt_size.to_f64_px();
hb_font_set_ppem(hb_font, pt_size as c_uint, pt_size as c_uint);
// Set scaling. Note that this takes 16.16 fixed point.
hb_font_set_scale(hb_font,
Shaper::float_to_fixed(pt_size) as c_int,
Shaper::float_to_fixed(pt_size) as c_int);
// configure static function callbacks.
hb_font_set_funcs(hb_font, HB_FONT_FUNCS.0, font as *mut Font as *mut c_void, None);
Shaper {
hb_face: hb_face,
hb_font: hb_font,
font: font,
}
}
}
fn float_to_fixed(f: f64) -> i32 {
float_to_fixed(16, f)
}
fn fixed_to_float(i: hb_position_t) -> f64 {
fixed_to_float(16, i)
}
}
impl ShaperMethods for Shaper {
/// Calculate the layout metrics associated with the given text when painted in a specific
/// font.
fn shape_text(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
unsafe {
let hb_buffer: *mut hb_buffer_t = hb_buffer_create();
hb_buffer_set_direction(hb_buffer, if options.flags.contains(ShapingFlags::RTL_FLAG) {
HB_DIRECTION_RTL
} else {
HB_DIRECTION_LTR
});
hb_buffer_set_script(hb_buffer, options.script.to_hb_script());
hb_buffer_add_utf8(hb_buffer,
text.as_ptr() as *const c_char,
text.len() as c_int,
0,
text.len() as c_int);
let mut features = Vec::new();
if options.flags.contains(ShapingFlags::IGNORE_LIGATURES_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: LIGA,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
if options.flags.contains(ShapingFlags::DISABLE_KERNING_SHAPING_FLAG) {
features.push(hb_feature_t {
tag: KERN,
value: 0,
start: 0,
end: hb_buffer_get_length(hb_buffer),
})
}
hb_shape(self.hb_font, hb_buffer, features.as_mut_ptr(), features.len() as u32);
self.save_glyph_results(text, options, glyphs, hb_buffer);
hb_buffer_destroy(hb_buffer);
}
}
}
impl Shaper {
fn save_glyph_results(&self,
text: &str,
options: &ShapingOptions,
glyphs: &mut GlyphStore,
buffer: *mut hb_buffer_t)
|
i);
}
debug!("{} -> {}", i, loc);
}
debug!("text: {:?}", text);
debug!("(char idx): char->(glyph index):");
for (i, ch) in text.char_indices() {
debug!("{}: {:?} --> {}", i, ch, byte_to_glyph[i]);
}
let mut glyph_span = 0..0;
let mut byte_range = 0..0;
let mut y_pos = Au(0);
// main loop over each glyph. each iteration usually processes 1 glyph and 1+ chars.
// in cases with complex glyph-character associations, 2+ glyphs and 1+ chars can be
// processed.
while glyph_span.start < glyph_count {
debug!("Processing glyph at idx={}", glyph_span.start);
glyph_span.end = glyph_span.start;
byte_range.end = glyph_data.byte_offset_of_glyph(glyph_span.start) as usize;
while byte_range.end < byte_max {
byte_range.end += 1;
// Extend the byte range to include any following byte without its own glyph.
while byte_range.end < byte_max && byte_to_glyph[byte_range.end] == NO_GLYPH {
byte_range.end += 1;
}
// Extend the glyph range to include all glyphs covered by bytes processed so far.
let mut max_glyph_idx = glyph_span.end;
for glyph_idx in &byte_to_glyph[byte_range.clone()] {
if *glyph_idx!= NO_GLYPH {
max_glyph_idx = cmp::max(*glyph_idx as usize + 1, max_glyph_idx);
}
}
if max_glyph_idx > glyph_span.end {
glyph_span.end = max_glyph_idx;
debug!("Extended glyph span to {:?}", glyph_span);
}
// if there's just one glyph, then we don't need further checks.
if glyph_span.len() == 1 { break; }
// if no glyphs were found yet, extend the char byte range more.
if glyph_span.len() == 0 { continue; }
// If byte_range now includes all the byte offsets found in glyph_span, then we
// have found a contiguous "cluster" and can stop extending it.
let mut all_glyphs_are_within_cluster: bool = true;
for j in glyph_span.clone() {
let loc = glyph_data.byte_offset_of_glyph(j) as usize;
if!(byte_range.start <= loc && loc < byte_range.end) {
all_glyphs_are_within_cluster = false;
break
}
}
if all_glyphs_are_within_cluster {
break
}
// Otherwise, the bytes we have seen so far correspond to a non-contiguous set of
// glyphs. Keep extending byte_range until we fill in all the holes in the glyph
// span or reach the end of the text.
}
assert!(byte_range.len() > 0);
assert!(glyph_span.len() > 0);
// Now byte_range is the ligature clump formed by the glyphs in glyph_span.
// We will save these glyphs to the glyph store at the index of the first byte.
let byte_idx = ByteIndex(byte_range.start as isize);
if glyph_span.len() == 1 {
// Fast path: 1-to-1 mapping of byte offset to single glyph.
//
// TODO(Issue #214): cluster ranges need to be computed before
// shaping, and then consulted here.
// for now, just pretend that every character is a cluster start.
// (i.e., pretend there are no combining character sequences).
// 1-to-1 mapping of character to glyph also treated as ligature start.
//
// NB: When we acquire the ability to handle ligatures that cross word boundaries,
// we'll need to do something special to handle `word-spacing` properly.
let character = text[byte_range.clone()].chars().next().unwrap();
if is_bidi_control(character) {
// Don't add any glyphs for bidi control chars
} else if character == '\t' {
// Treat tabs in pre-formatted text as a fixed number of spaces.
//
// TODO: Proper tab stops.
const TAB_COLS: i32 = 8;
let (space_glyph_id, space_advance) = glyph_space_advance(self.font);
let advance = Au::from_f64_px(space_advance) * TAB_COLS;
let data = GlyphData::new(space_glyph_id,
advance,
Default::default(),
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
} else {
let shape = glyph_data.entry_for_glyph(glyph_span.start, &mut y_pos);
let advance = self.advance_for_shaped_glyph(shape.advance, character, options);
let data = GlyphData::new(shape.codepoint,
advance,
shape.offset,
true,
true);
glyphs.add_glyph_for_byte_index(byte_idx, character, &data);
}
} else {
// collect all glyphs to be assigned to the first character.
let mut datas = vec!();
for glyph_i in glyph_span.clone() {
let shape = glyph_data.entry_for_glyph(glyph_i, &mut y_pos);
datas.push(GlyphData::new(shape.codepoint,
shape.advance,
shape.offset,
true, // treat as cluster start
glyph_i > glyph_span.start));
// all but first are ligature continuations
}
// now add the detailed glyph entry.
glyphs.add_glyphs_for_byte_index(byte_idx, &datas);
}
glyph_span.start = glyph_span.end;
byte_range.start = byte_range.end;
}
// this must be called after adding all glyph data; it sorts the
// lookup table for finding detailed glyphs by associated char index.
glyphs.finalize_changes();
}
fn advance_for_shaped_glyph(&self, mut advance: Au, character: char, options: &ShapingOptions)
-> Au {
if let Some(letter_spacing) = options.letter_spacing {
advance = advance + letter_spacing;
};
// CSS 2.1 § 16.4 states that "word spacing affects each space (U+0020) and non-breaking
// space (U+00A0) left in the text after the white space processing rules have been
// applied. The effect of the property on other word-separator characters is undefined."
// We elect to only space the two required code points.
if character =='' || character == '\u{a0}' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au::new((advance.0 as f32 * percent.into_inner()) as i32);
}
advance
}
}
/// Callbacks from Harfbuzz when font map and glyph advance lookup needed.
struct FontFuncs(*mut hb_font_funcs_t);
unsafe impl Sync for FontFuncs {}
lazy_static! {
static ref HB_FONT_FUNCS: FontFuncs = unsafe {
let hb_funcs = hb_font_funcs_create();
hb_font_funcs_set_nominal_glyph_func(hb_funcs, Some(glyph_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_advance_func(
hb_funcs, Some(glyph_h_advance_func), ptr::null_mut(), None);
hb_font_funcs_set_glyph_h_kerning_func(
hb_funcs, Some(glyph_h_kerning_func), ptr::null_mut(), None);
FontFuncs(hb_funcs)
};
}
extern fn glyph_func(_: *mut hb_font_t,
font_data: *mut c_void,
unicode: hb_codepoint_t,
glyph: *mut hb_codepoint_t,
_: *mut c_void)
-> hb_bool_t {
let font: *const Font = font_data as *const Font;
assert!(!font.is_null());
unsafe {
match (*font).glyph_index(char::from_u32(unicode).unwrap()) {
Some(g) => {
|
{
let glyph_data = ShapedGlyphData::new(buffer);
let glyph_count = glyph_data.len();
let byte_max = text.len();
debug!("Shaped text[byte count={}], got back {} glyph info records.",
byte_max,
glyph_count);
// make map of what chars have glyphs
let mut byte_to_glyph = vec![NO_GLYPH; byte_max];
debug!("(glyph idx) -> (text byte offset)");
for i in 0..glyph_data.len() {
let loc = glyph_data.byte_offset_of_glyph(i) as usize;
if loc < byte_max {
byte_to_glyph[loc] = i as i32;
} else {
debug!("ERROR: tried to set out of range byte_to_glyph: idx={}, glyph idx={}",
loc,
|
identifier_body
|
issue-60726.rs
|
use std::marker::PhantomData;
pub struct True;
pub struct False;
pub trait InterfaceType{
type Send;
}
|
pub struct DynTrait<I>{
_interface:PhantomData<fn()->I>,
_unsync_unsend:PhantomData<::std::rc::Rc<()>>,
}
unsafe impl<I> Send for DynTrait<I>
where
I:InterfaceType<Send=True>
{}
// @has issue_60726/struct.IntoIter.html
// @has - '//*[@id="synthetic-implementations-list"]//*[@class="impl has-srclink"]//h3[@class="code-header in-band"]' \
// "impl<T>!Send for IntoIter<T>"
// @has - '//*[@id="synthetic-implementations-list"]//*[@class="impl has-srclink"]//h3[@class="code-header in-band"]' \
// "impl<T>!Sync for IntoIter<T>"
pub struct IntoIter<T>{
hello:DynTrait<FooInterface<T>>,
}
|
pub struct FooInterface<T>(PhantomData<fn()->T>);
impl<T> InterfaceType for FooInterface<T> {
type Send=False;
}
|
random_line_split
|
issue-60726.rs
|
use std::marker::PhantomData;
pub struct True;
pub struct
|
;
pub trait InterfaceType{
type Send;
}
pub struct FooInterface<T>(PhantomData<fn()->T>);
impl<T> InterfaceType for FooInterface<T> {
type Send=False;
}
pub struct DynTrait<I>{
_interface:PhantomData<fn()->I>,
_unsync_unsend:PhantomData<::std::rc::Rc<()>>,
}
unsafe impl<I> Send for DynTrait<I>
where
I:InterfaceType<Send=True>
{}
// @has issue_60726/struct.IntoIter.html
// @has - '//*[@id="synthetic-implementations-list"]//*[@class="impl has-srclink"]//h3[@class="code-header in-band"]' \
// "impl<T>!Send for IntoIter<T>"
// @has - '//*[@id="synthetic-implementations-list"]//*[@class="impl has-srclink"]//h3[@class="code-header in-band"]' \
// "impl<T>!Sync for IntoIter<T>"
pub struct IntoIter<T>{
hello:DynTrait<FooInterface<T>>,
}
|
False
|
identifier_name
|
duplicate.rs
|
// Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use std::collections::HashMap;
use front::ast::Rule as FRule;
use middle::analysis::ast::*;
use monad::partial::Partial::*;
use rust;
use std::ops::Deref;
pub fn rule_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
rules: Vec<FRule>) -> Partial<Grammar>
{
DuplicateItem::analyse(cx, rules.into_iter(), String::from("rule"))
.map(|rules|
rules.into_iter().map(|(id, frule)| (id, Rule::new(frule.name, frule.def))).collect())
.map(move |rules| { grammar.rules = rules; grammar })
}
pub fn rust_functions_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
items: Vec<RItem>) -> Partial<Grammar>
{
let mut functions = vec![];
let mut others = vec![];
for item in items {
if let &rust::Item_::ItemFn(..) = &item.node {
functions.push(item);
}
else {
others.push(item);
}
}
DuplicateItem::analyse(cx, functions.into_iter(), String::from("rust function"))
.map(move |functions| {
grammar.rust_functions = functions;
grammar.rust_items = others;
grammar
})
}
impl ItemIdent for rust::Item {
fn ident(&self) -> Ident {
self.ident.clone()
}
}
impl ItemSpan for rust::Item {
fn span(&self) -> Span {
self.span.clone()
}
}
impl<InnerItem: ItemIdent> ItemIdent for rust::P<InnerItem> {
fn ident(&self) -> Ident {
self.deref().ident()
}
}
impl<InnerItem: ItemSpan> ItemSpan for rust::P<InnerItem> {
fn span(&self) -> Span {
self.deref().span()
}
}
impl ItemIdent for Rule {
fn ident(&self) -> Ident {
self.name.node.clone()
}
}
impl ItemSpan for Rule {
fn span(&self) -> Span
|
}
struct DuplicateItem<'a, Item>
{
cx: &'a ExtCtxt<'a>,
items: HashMap<Ident, Item>,
has_duplicate: bool,
what_is_duplicated: String
}
impl<'a, Item> DuplicateItem<'a, Item> where
Item: ItemIdent + ItemSpan
{
pub fn analyse<ItemIter>(cx: &'a ExtCtxt<'a>, iter: ItemIter, item_kind: String)
-> Partial<HashMap<Ident, Item>> where
ItemIter: Iterator<Item=Item>
{
let (min_size, _) = iter.size_hint();
DuplicateItem {
cx: cx,
items: HashMap::with_capacity(min_size),
has_duplicate: false,
what_is_duplicated: item_kind
}.populate(iter)
.make()
}
fn populate<ItemIter: Iterator<Item=Item>>(mut self, iter: ItemIter)
-> DuplicateItem<'a, Item>
{
for item in iter {
let ident = item.ident();
if self.items.contains_key(&ident) {
self.duplicate_items(self.items.get(&ident).unwrap(), item);
self.has_duplicate = true;
} else {
self.items.insert(ident, item);
}
}
self
}
fn duplicate_items(&self, pre: &Item, current: Item) {
self.cx.span_err(current.span(), format!(
"duplicate definition of {} `{}`",
self.what_is_duplicated, current.ident()).as_str());
self.cx.span_note(pre.span(), format!(
"previous definition of {} `{}` here",
self.what_is_duplicated, pre.ident()).as_str());
}
fn make(self) -> Partial<HashMap<Ident, Item>> {
if self.has_duplicate {
Fake(self.items)
} else {
Value(self.items)
}
}
}
|
{
self.name.span.clone()
}
|
identifier_body
|
duplicate.rs
|
// Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use std::collections::HashMap;
use front::ast::Rule as FRule;
use middle::analysis::ast::*;
use monad::partial::Partial::*;
use rust;
use std::ops::Deref;
pub fn rule_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
rules: Vec<FRule>) -> Partial<Grammar>
{
DuplicateItem::analyse(cx, rules.into_iter(), String::from("rule"))
.map(|rules|
rules.into_iter().map(|(id, frule)| (id, Rule::new(frule.name, frule.def))).collect())
.map(move |rules| { grammar.rules = rules; grammar })
}
pub fn rust_functions_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
items: Vec<RItem>) -> Partial<Grammar>
{
let mut functions = vec![];
let mut others = vec![];
for item in items {
if let &rust::Item_::ItemFn(..) = &item.node
|
else {
others.push(item);
}
}
DuplicateItem::analyse(cx, functions.into_iter(), String::from("rust function"))
.map(move |functions| {
grammar.rust_functions = functions;
grammar.rust_items = others;
grammar
})
}
impl ItemIdent for rust::Item {
fn ident(&self) -> Ident {
self.ident.clone()
}
}
impl ItemSpan for rust::Item {
fn span(&self) -> Span {
self.span.clone()
}
}
impl<InnerItem: ItemIdent> ItemIdent for rust::P<InnerItem> {
fn ident(&self) -> Ident {
self.deref().ident()
}
}
impl<InnerItem: ItemSpan> ItemSpan for rust::P<InnerItem> {
fn span(&self) -> Span {
self.deref().span()
}
}
impl ItemIdent for Rule {
fn ident(&self) -> Ident {
self.name.node.clone()
}
}
impl ItemSpan for Rule {
fn span(&self) -> Span {
self.name.span.clone()
}
}
struct DuplicateItem<'a, Item>
{
cx: &'a ExtCtxt<'a>,
items: HashMap<Ident, Item>,
has_duplicate: bool,
what_is_duplicated: String
}
impl<'a, Item> DuplicateItem<'a, Item> where
Item: ItemIdent + ItemSpan
{
pub fn analyse<ItemIter>(cx: &'a ExtCtxt<'a>, iter: ItemIter, item_kind: String)
-> Partial<HashMap<Ident, Item>> where
ItemIter: Iterator<Item=Item>
{
let (min_size, _) = iter.size_hint();
DuplicateItem {
cx: cx,
items: HashMap::with_capacity(min_size),
has_duplicate: false,
what_is_duplicated: item_kind
}.populate(iter)
.make()
}
fn populate<ItemIter: Iterator<Item=Item>>(mut self, iter: ItemIter)
-> DuplicateItem<'a, Item>
{
for item in iter {
let ident = item.ident();
if self.items.contains_key(&ident) {
self.duplicate_items(self.items.get(&ident).unwrap(), item);
self.has_duplicate = true;
} else {
self.items.insert(ident, item);
}
}
self
}
fn duplicate_items(&self, pre: &Item, current: Item) {
self.cx.span_err(current.span(), format!(
"duplicate definition of {} `{}`",
self.what_is_duplicated, current.ident()).as_str());
self.cx.span_note(pre.span(), format!(
"previous definition of {} `{}` here",
self.what_is_duplicated, pre.ident()).as_str());
}
fn make(self) -> Partial<HashMap<Ident, Item>> {
if self.has_duplicate {
Fake(self.items)
} else {
Value(self.items)
}
}
}
|
{
functions.push(item);
}
|
conditional_block
|
duplicate.rs
|
// Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use std::collections::HashMap;
use front::ast::Rule as FRule;
use middle::analysis::ast::*;
use monad::partial::Partial::*;
use rust;
use std::ops::Deref;
pub fn rule_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
rules: Vec<FRule>) -> Partial<Grammar>
{
DuplicateItem::analyse(cx, rules.into_iter(), String::from("rule"))
.map(|rules|
rules.into_iter().map(|(id, frule)| (id, Rule::new(frule.name, frule.def))).collect())
.map(move |rules| { grammar.rules = rules; grammar })
}
pub fn rust_functions_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
items: Vec<RItem>) -> Partial<Grammar>
{
let mut functions = vec![];
let mut others = vec![];
for item in items {
if let &rust::Item_::ItemFn(..) = &item.node {
functions.push(item);
}
else {
others.push(item);
}
}
DuplicateItem::analyse(cx, functions.into_iter(), String::from("rust function"))
.map(move |functions| {
grammar.rust_functions = functions;
grammar.rust_items = others;
grammar
})
}
impl ItemIdent for rust::Item {
fn ident(&self) -> Ident {
self.ident.clone()
}
}
impl ItemSpan for rust::Item {
fn span(&self) -> Span {
self.span.clone()
}
}
impl<InnerItem: ItemIdent> ItemIdent for rust::P<InnerItem> {
fn
|
(&self) -> Ident {
self.deref().ident()
}
}
impl<InnerItem: ItemSpan> ItemSpan for rust::P<InnerItem> {
fn span(&self) -> Span {
self.deref().span()
}
}
impl ItemIdent for Rule {
fn ident(&self) -> Ident {
self.name.node.clone()
}
}
impl ItemSpan for Rule {
fn span(&self) -> Span {
self.name.span.clone()
}
}
struct DuplicateItem<'a, Item>
{
cx: &'a ExtCtxt<'a>,
items: HashMap<Ident, Item>,
has_duplicate: bool,
what_is_duplicated: String
}
impl<'a, Item> DuplicateItem<'a, Item> where
Item: ItemIdent + ItemSpan
{
pub fn analyse<ItemIter>(cx: &'a ExtCtxt<'a>, iter: ItemIter, item_kind: String)
-> Partial<HashMap<Ident, Item>> where
ItemIter: Iterator<Item=Item>
{
let (min_size, _) = iter.size_hint();
DuplicateItem {
cx: cx,
items: HashMap::with_capacity(min_size),
has_duplicate: false,
what_is_duplicated: item_kind
}.populate(iter)
.make()
}
fn populate<ItemIter: Iterator<Item=Item>>(mut self, iter: ItemIter)
-> DuplicateItem<'a, Item>
{
for item in iter {
let ident = item.ident();
if self.items.contains_key(&ident) {
self.duplicate_items(self.items.get(&ident).unwrap(), item);
self.has_duplicate = true;
} else {
self.items.insert(ident, item);
}
}
self
}
fn duplicate_items(&self, pre: &Item, current: Item) {
self.cx.span_err(current.span(), format!(
"duplicate definition of {} `{}`",
self.what_is_duplicated, current.ident()).as_str());
self.cx.span_note(pre.span(), format!(
"previous definition of {} `{}` here",
self.what_is_duplicated, pre.ident()).as_str());
}
fn make(self) -> Partial<HashMap<Ident, Item>> {
if self.has_duplicate {
Fake(self.items)
} else {
Value(self.items)
}
}
}
|
ident
|
identifier_name
|
duplicate.rs
|
// Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use std::collections::HashMap;
use front::ast::Rule as FRule;
use middle::analysis::ast::*;
use monad::partial::Partial::*;
use rust;
use std::ops::Deref;
pub fn rule_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
rules: Vec<FRule>) -> Partial<Grammar>
{
DuplicateItem::analyse(cx, rules.into_iter(), String::from("rule"))
.map(|rules|
rules.into_iter().map(|(id, frule)| (id, Rule::new(frule.name, frule.def))).collect())
.map(move |rules| { grammar.rules = rules; grammar })
}
pub fn rust_functions_duplicate<'a>(cx: &'a ExtCtxt<'a>, mut grammar: Grammar,
items: Vec<RItem>) -> Partial<Grammar>
{
let mut functions = vec![];
let mut others = vec![];
for item in items {
if let &rust::Item_::ItemFn(..) = &item.node {
functions.push(item);
}
else {
others.push(item);
}
}
DuplicateItem::analyse(cx, functions.into_iter(), String::from("rust function"))
.map(move |functions| {
grammar.rust_functions = functions;
grammar.rust_items = others;
grammar
})
}
impl ItemIdent for rust::Item {
fn ident(&self) -> Ident {
self.ident.clone()
}
}
impl ItemSpan for rust::Item {
fn span(&self) -> Span {
self.span.clone()
}
}
impl<InnerItem: ItemIdent> ItemIdent for rust::P<InnerItem> {
fn ident(&self) -> Ident {
self.deref().ident()
}
}
impl<InnerItem: ItemSpan> ItemSpan for rust::P<InnerItem> {
fn span(&self) -> Span {
self.deref().span()
}
}
impl ItemIdent for Rule {
fn ident(&self) -> Ident {
self.name.node.clone()
}
}
impl ItemSpan for Rule {
fn span(&self) -> Span {
self.name.span.clone()
}
}
struct DuplicateItem<'a, Item>
{
cx: &'a ExtCtxt<'a>,
items: HashMap<Ident, Item>,
has_duplicate: bool,
what_is_duplicated: String
}
impl<'a, Item> DuplicateItem<'a, Item> where
Item: ItemIdent + ItemSpan
{
pub fn analyse<ItemIter>(cx: &'a ExtCtxt<'a>, iter: ItemIter, item_kind: String)
-> Partial<HashMap<Ident, Item>> where
ItemIter: Iterator<Item=Item>
{
let (min_size, _) = iter.size_hint();
DuplicateItem {
cx: cx,
items: HashMap::with_capacity(min_size),
has_duplicate: false,
what_is_duplicated: item_kind
}.populate(iter)
.make()
}
fn populate<ItemIter: Iterator<Item=Item>>(mut self, iter: ItemIter)
-> DuplicateItem<'a, Item>
{
for item in iter {
let ident = item.ident();
if self.items.contains_key(&ident) {
self.duplicate_items(self.items.get(&ident).unwrap(), item);
self.has_duplicate = true;
|
}
fn duplicate_items(&self, pre: &Item, current: Item) {
self.cx.span_err(current.span(), format!(
"duplicate definition of {} `{}`",
self.what_is_duplicated, current.ident()).as_str());
self.cx.span_note(pre.span(), format!(
"previous definition of {} `{}` here",
self.what_is_duplicated, pre.ident()).as_str());
}
fn make(self) -> Partial<HashMap<Ident, Item>> {
if self.has_duplicate {
Fake(self.items)
} else {
Value(self.items)
}
}
}
|
} else {
self.items.insert(ident, item);
}
}
self
|
random_line_split
|
sudoku_solve.rs
|
// Part of Cosmos by OpenGenus
const N: usize = 9;
const UNASSIGNED: u8 = 0;
type Board = [[u8; N]; N];
fn solve_sudoku(grid: &mut Board) -> bool {
let row;
let col;
if let Some((r, c)) = find_unassigned_cells(&grid) {
row = r;
col = c;
} else {
// SOLVED
return true;
}
for num in 1..=9 {
if is_safe(&grid, row, col, num) {
grid[row][col] = num;
if solve_sudoku(grid) {
return true;
}
// Failed, try again
grid[row][col] = UNASSIGNED;
}
}
false
}
fn used_in_row(grid: &Board, row: usize, num: u8) -> bool {
for col in 0..N {
if grid[row][col] == num {
return true;
}
}
false
}
fn used_in_col(grid: &Board, col: usize, num: u8) -> bool {
for row in grid.iter().take(N) {
if row[col] == num {
return true;
}
}
false
}
fn used_in_box(grid: &Board, row_start: usize, col_start: usize, num: u8) -> bool {
for row in 0..3 {
for col in 0..3 {
if grid[row + row_start][col + col_start] == num {
return true;
}
}
}
false
}
fn is_safe(grid: &Board, row: usize, col: usize, num: u8) -> bool {
!used_in_row(grid, row, num)
&&!used_in_col(grid, col, num)
&&!used_in_box(grid, row - (row % 3), col - (col % 3), num)
}
fn find_unassigned_cells(grid: &Board) -> Option<(usize, usize)>
|
fn print_grid(grid: &Board) {
for row in grid.iter().take(N) {
for col in 0..N {
print!("{} ", row[col]);
}
println!();
}
}
#[test]
fn test1() {
let mut board = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved_board = [
[3, 1, 6, 5, 7, 8, 4, 9, 2],
[5, 2, 9, 1, 3, 4, 7, 6, 8],
[4, 8, 7, 6, 2, 9, 5, 3, 1],
[2, 6, 3, 4, 1, 5, 9, 8, 7],
[9, 7, 4, 8, 6, 3, 1, 2, 5],
[8, 5, 1, 7, 9, 2, 6, 4, 3],
[1, 3, 8, 9, 4, 7, 2, 5, 6],
[6, 9, 2, 3, 5, 1, 8, 7, 4],
[7, 4, 5, 2, 8, 6, 3, 1, 9],
];
assert_eq!(board, solved_board)
}
#[test]
fn test2() {
let mut board = [
[2, 0, 0, 9, 0, 0, 1, 0, 0],
[6, 0, 0, 0, 5, 1, 0, 0, 0],
[0, 9, 5, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 0, 0],
[5, 0, 9, 1, 0, 2, 4, 0, 3],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5, 6, 3, 0],
[0, 0, 0, 4, 6, 0, 0, 0, 9],
[0, 0, 3, 0, 0, 9, 0, 0, 2],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved = [
[2, 3, 4, 9, 7, 6, 1, 8, 5],
[6, 8, 7, 3, 5, 1, 9, 2, 4],
[1, 9, 5, 2, 4, 8, 3, 7, 6],
[4, 1, 6, 5, 3, 7, 2, 9, 8],
[5, 7, 9, 1, 8, 2, 4, 6, 3],
[3, 2, 8, 6, 9, 4, 7, 5, 1],
[9, 4, 1, 8, 2, 5, 6, 3, 7],
[7, 5, 2, 4, 6, 3, 8, 1, 9],
[8, 6, 3, 7, 1, 9, 5, 4, 2],
];
assert_eq!(board, solved);
}
|
{
for (row, _) in grid.iter().enumerate().take(N) {
for col in 0..N {
if grid[row][col] == UNASSIGNED {
return Some((row, col));
}
}
}
None
}
|
identifier_body
|
sudoku_solve.rs
|
// Part of Cosmos by OpenGenus
const N: usize = 9;
const UNASSIGNED: u8 = 0;
type Board = [[u8; N]; N];
fn solve_sudoku(grid: &mut Board) -> bool {
let row;
let col;
if let Some((r, c)) = find_unassigned_cells(&grid) {
row = r;
col = c;
} else {
// SOLVED
return true;
}
for num in 1..=9 {
if is_safe(&grid, row, col, num) {
grid[row][col] = num;
if solve_sudoku(grid) {
return true;
}
// Failed, try again
grid[row][col] = UNASSIGNED;
}
}
false
}
fn used_in_row(grid: &Board, row: usize, num: u8) -> bool {
for col in 0..N {
if grid[row][col] == num {
return true;
}
}
false
}
fn used_in_col(grid: &Board, col: usize, num: u8) -> bool {
for row in grid.iter().take(N) {
if row[col] == num {
return true;
}
}
false
}
fn used_in_box(grid: &Board, row_start: usize, col_start: usize, num: u8) -> bool {
for row in 0..3 {
for col in 0..3 {
if grid[row + row_start][col + col_start] == num {
return true;
}
}
}
false
}
fn is_safe(grid: &Board, row: usize, col: usize, num: u8) -> bool {
!used_in_row(grid, row, num)
&&!used_in_col(grid, col, num)
&&!used_in_box(grid, row - (row % 3), col - (col % 3), num)
}
fn
|
(grid: &Board) -> Option<(usize, usize)> {
for (row, _) in grid.iter().enumerate().take(N) {
for col in 0..N {
if grid[row][col] == UNASSIGNED {
return Some((row, col));
}
}
}
None
}
fn print_grid(grid: &Board) {
for row in grid.iter().take(N) {
for col in 0..N {
print!("{} ", row[col]);
}
println!();
}
}
#[test]
fn test1() {
let mut board = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved_board = [
[3, 1, 6, 5, 7, 8, 4, 9, 2],
[5, 2, 9, 1, 3, 4, 7, 6, 8],
[4, 8, 7, 6, 2, 9, 5, 3, 1],
[2, 6, 3, 4, 1, 5, 9, 8, 7],
[9, 7, 4, 8, 6, 3, 1, 2, 5],
[8, 5, 1, 7, 9, 2, 6, 4, 3],
[1, 3, 8, 9, 4, 7, 2, 5, 6],
[6, 9, 2, 3, 5, 1, 8, 7, 4],
[7, 4, 5, 2, 8, 6, 3, 1, 9],
];
assert_eq!(board, solved_board)
}
#[test]
fn test2() {
let mut board = [
[2, 0, 0, 9, 0, 0, 1, 0, 0],
[6, 0, 0, 0, 5, 1, 0, 0, 0],
[0, 9, 5, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 0, 0],
[5, 0, 9, 1, 0, 2, 4, 0, 3],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5, 6, 3, 0],
[0, 0, 0, 4, 6, 0, 0, 0, 9],
[0, 0, 3, 0, 0, 9, 0, 0, 2],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved = [
[2, 3, 4, 9, 7, 6, 1, 8, 5],
[6, 8, 7, 3, 5, 1, 9, 2, 4],
[1, 9, 5, 2, 4, 8, 3, 7, 6],
[4, 1, 6, 5, 3, 7, 2, 9, 8],
[5, 7, 9, 1, 8, 2, 4, 6, 3],
[3, 2, 8, 6, 9, 4, 7, 5, 1],
[9, 4, 1, 8, 2, 5, 6, 3, 7],
[7, 5, 2, 4, 6, 3, 8, 1, 9],
[8, 6, 3, 7, 1, 9, 5, 4, 2],
];
assert_eq!(board, solved);
}
|
find_unassigned_cells
|
identifier_name
|
sudoku_solve.rs
|
// Part of Cosmos by OpenGenus
const N: usize = 9;
const UNASSIGNED: u8 = 0;
type Board = [[u8; N]; N];
fn solve_sudoku(grid: &mut Board) -> bool {
let row;
let col;
if let Some((r, c)) = find_unassigned_cells(&grid) {
row = r;
col = c;
} else {
// SOLVED
return true;
}
for num in 1..=9 {
if is_safe(&grid, row, col, num) {
grid[row][col] = num;
if solve_sudoku(grid) {
return true;
}
// Failed, try again
grid[row][col] = UNASSIGNED;
}
}
false
}
fn used_in_row(grid: &Board, row: usize, num: u8) -> bool {
for col in 0..N {
if grid[row][col] == num {
return true;
}
}
false
}
fn used_in_col(grid: &Board, col: usize, num: u8) -> bool {
for row in grid.iter().take(N) {
if row[col] == num {
return true;
}
}
false
}
fn used_in_box(grid: &Board, row_start: usize, col_start: usize, num: u8) -> bool {
for row in 0..3 {
for col in 0..3 {
if grid[row + row_start][col + col_start] == num {
return true;
}
}
}
false
}
fn is_safe(grid: &Board, row: usize, col: usize, num: u8) -> bool {
!used_in_row(grid, row, num)
&&!used_in_col(grid, col, num)
&&!used_in_box(grid, row - (row % 3), col - (col % 3), num)
}
fn find_unassigned_cells(grid: &Board) -> Option<(usize, usize)> {
for (row, _) in grid.iter().enumerate().take(N) {
for col in 0..N {
if grid[row][col] == UNASSIGNED {
return Some((row, col));
}
}
}
None
}
fn print_grid(grid: &Board) {
for row in grid.iter().take(N) {
for col in 0..N {
print!("{} ", row[col]);
}
println!();
}
}
#[test]
fn test1() {
let mut board = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved_board = [
[3, 1, 6, 5, 7, 8, 4, 9, 2],
[5, 2, 9, 1, 3, 4, 7, 6, 8],
[4, 8, 7, 6, 2, 9, 5, 3, 1],
[2, 6, 3, 4, 1, 5, 9, 8, 7],
[9, 7, 4, 8, 6, 3, 1, 2, 5],
[8, 5, 1, 7, 9, 2, 6, 4, 3],
[1, 3, 8, 9, 4, 7, 2, 5, 6],
[6, 9, 2, 3, 5, 1, 8, 7, 4],
[7, 4, 5, 2, 8, 6, 3, 1, 9],
];
assert_eq!(board, solved_board)
}
#[test]
fn test2() {
let mut board = [
[2, 0, 0, 9, 0, 0, 1, 0, 0],
[6, 0, 0, 0, 5, 1, 0, 0, 0],
[0, 9, 5, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 0, 0],
[5, 0, 9, 1, 0, 2, 4, 0, 3],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5, 6, 3, 0],
[0, 0, 0, 4, 6, 0, 0, 0, 9],
[0, 0, 3, 0, 0, 9, 0, 0, 2],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved = [
[2, 3, 4, 9, 7, 6, 1, 8, 5],
[6, 8, 7, 3, 5, 1, 9, 2, 4],
[1, 9, 5, 2, 4, 8, 3, 7, 6],
|
[4, 1, 6, 5, 3, 7, 2, 9, 8],
[5, 7, 9, 1, 8, 2, 4, 6, 3],
[3, 2, 8, 6, 9, 4, 7, 5, 1],
[9, 4, 1, 8, 2, 5, 6, 3, 7],
[7, 5, 2, 4, 6, 3, 8, 1, 9],
[8, 6, 3, 7, 1, 9, 5, 4, 2],
];
assert_eq!(board, solved);
}
|
random_line_split
|
|
sudoku_solve.rs
|
// Part of Cosmos by OpenGenus
const N: usize = 9;
const UNASSIGNED: u8 = 0;
type Board = [[u8; N]; N];
fn solve_sudoku(grid: &mut Board) -> bool {
let row;
let col;
if let Some((r, c)) = find_unassigned_cells(&grid) {
row = r;
col = c;
} else
|
for num in 1..=9 {
if is_safe(&grid, row, col, num) {
grid[row][col] = num;
if solve_sudoku(grid) {
return true;
}
// Failed, try again
grid[row][col] = UNASSIGNED;
}
}
false
}
fn used_in_row(grid: &Board, row: usize, num: u8) -> bool {
for col in 0..N {
if grid[row][col] == num {
return true;
}
}
false
}
fn used_in_col(grid: &Board, col: usize, num: u8) -> bool {
for row in grid.iter().take(N) {
if row[col] == num {
return true;
}
}
false
}
fn used_in_box(grid: &Board, row_start: usize, col_start: usize, num: u8) -> bool {
for row in 0..3 {
for col in 0..3 {
if grid[row + row_start][col + col_start] == num {
return true;
}
}
}
false
}
fn is_safe(grid: &Board, row: usize, col: usize, num: u8) -> bool {
!used_in_row(grid, row, num)
&&!used_in_col(grid, col, num)
&&!used_in_box(grid, row - (row % 3), col - (col % 3), num)
}
fn find_unassigned_cells(grid: &Board) -> Option<(usize, usize)> {
for (row, _) in grid.iter().enumerate().take(N) {
for col in 0..N {
if grid[row][col] == UNASSIGNED {
return Some((row, col));
}
}
}
None
}
fn print_grid(grid: &Board) {
for row in grid.iter().take(N) {
for col in 0..N {
print!("{} ", row[col]);
}
println!();
}
}
#[test]
fn test1() {
let mut board = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved_board = [
[3, 1, 6, 5, 7, 8, 4, 9, 2],
[5, 2, 9, 1, 3, 4, 7, 6, 8],
[4, 8, 7, 6, 2, 9, 5, 3, 1],
[2, 6, 3, 4, 1, 5, 9, 8, 7],
[9, 7, 4, 8, 6, 3, 1, 2, 5],
[8, 5, 1, 7, 9, 2, 6, 4, 3],
[1, 3, 8, 9, 4, 7, 2, 5, 6],
[6, 9, 2, 3, 5, 1, 8, 7, 4],
[7, 4, 5, 2, 8, 6, 3, 1, 9],
];
assert_eq!(board, solved_board)
}
#[test]
fn test2() {
let mut board = [
[2, 0, 0, 9, 0, 0, 1, 0, 0],
[6, 0, 0, 0, 5, 1, 0, 0, 0],
[0, 9, 5, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 0, 0],
[5, 0, 9, 1, 0, 2, 4, 0, 3],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5, 6, 3, 0],
[0, 0, 0, 4, 6, 0, 0, 0, 9],
[0, 0, 3, 0, 0, 9, 0, 0, 2],
];
// Can solve
assert!(solve_sudoku(&mut board));
let solved = [
[2, 3, 4, 9, 7, 6, 1, 8, 5],
[6, 8, 7, 3, 5, 1, 9, 2, 4],
[1, 9, 5, 2, 4, 8, 3, 7, 6],
[4, 1, 6, 5, 3, 7, 2, 9, 8],
[5, 7, 9, 1, 8, 2, 4, 6, 3],
[3, 2, 8, 6, 9, 4, 7, 5, 1],
[9, 4, 1, 8, 2, 5, 6, 3, 7],
[7, 5, 2, 4, 6, 3, 8, 1, 9],
[8, 6, 3, 7, 1, 9, 5, 4, 2],
];
assert_eq!(board, solved);
}
|
{
// SOLVED
return true;
}
|
conditional_block
|
basic.rs
|
// run-pass
#![feature(trait_upcasting)]
#![allow(incomplete_features)]
trait Foo: PartialEq<i32> + std::fmt::Debug + Send + Sync {
fn a(&self) -> i32 {
10
}
fn z(&self) -> i32 {
11
}
fn y(&self) -> i32 {
12
}
}
trait Bar: Foo {
|
fn b(&self) -> i32 {
20
}
fn w(&self) -> i32 {
21
}
}
trait Baz: Bar {
fn c(&self) -> i32 {
30
}
}
impl Foo for i32 {
fn a(&self) -> i32 {
100
}
}
impl Bar for i32 {
fn b(&self) -> i32 {
200
}
}
impl Baz for i32 {
fn c(&self) -> i32 {
300
}
}
fn main() {
let baz: &dyn Baz = &1;
let _: &dyn std::fmt::Debug = baz;
assert_eq!(*baz, 1);
assert_eq!(baz.a(), 100);
assert_eq!(baz.b(), 200);
assert_eq!(baz.c(), 300);
assert_eq!(baz.z(), 11);
assert_eq!(baz.y(), 12);
assert_eq!(baz.w(), 21);
let bar: &dyn Bar = baz;
let _: &dyn std::fmt::Debug = bar;
assert_eq!(*bar, 1);
assert_eq!(bar.a(), 100);
assert_eq!(bar.b(), 200);
assert_eq!(bar.z(), 11);
assert_eq!(bar.y(), 12);
assert_eq!(bar.w(), 21);
let foo: &dyn Foo = baz;
let _: &dyn std::fmt::Debug = foo;
assert_eq!(*foo, 1);
assert_eq!(foo.a(), 100);
assert_eq!(foo.z(), 11);
assert_eq!(foo.y(), 12);
let foo: &dyn Foo = bar;
let _: &dyn std::fmt::Debug = foo;
assert_eq!(*foo, 1);
assert_eq!(foo.a(), 100);
assert_eq!(foo.z(), 11);
assert_eq!(foo.y(), 12);
}
|
random_line_split
|
|
basic.rs
|
// run-pass
#![feature(trait_upcasting)]
#![allow(incomplete_features)]
trait Foo: PartialEq<i32> + std::fmt::Debug + Send + Sync {
fn a(&self) -> i32 {
10
}
fn z(&self) -> i32
|
fn y(&self) -> i32 {
12
}
}
trait Bar: Foo {
fn b(&self) -> i32 {
20
}
fn w(&self) -> i32 {
21
}
}
trait Baz: Bar {
fn c(&self) -> i32 {
30
}
}
impl Foo for i32 {
fn a(&self) -> i32 {
100
}
}
impl Bar for i32 {
fn b(&self) -> i32 {
200
}
}
impl Baz for i32 {
fn c(&self) -> i32 {
300
}
}
fn main() {
let baz: &dyn Baz = &1;
let _: &dyn std::fmt::Debug = baz;
assert_eq!(*baz, 1);
assert_eq!(baz.a(), 100);
assert_eq!(baz.b(), 200);
assert_eq!(baz.c(), 300);
assert_eq!(baz.z(), 11);
assert_eq!(baz.y(), 12);
assert_eq!(baz.w(), 21);
let bar: &dyn Bar = baz;
let _: &dyn std::fmt::Debug = bar;
assert_eq!(*bar, 1);
assert_eq!(bar.a(), 100);
assert_eq!(bar.b(), 200);
assert_eq!(bar.z(), 11);
assert_eq!(bar.y(), 12);
assert_eq!(bar.w(), 21);
let foo: &dyn Foo = baz;
let _: &dyn std::fmt::Debug = foo;
assert_eq!(*foo, 1);
assert_eq!(foo.a(), 100);
assert_eq!(foo.z(), 11);
assert_eq!(foo.y(), 12);
let foo: &dyn Foo = bar;
let _: &dyn std::fmt::Debug = foo;
assert_eq!(*foo, 1);
assert_eq!(foo.a(), 100);
assert_eq!(foo.z(), 11);
assert_eq!(foo.y(), 12);
}
|
{
11
}
|
identifier_body
|
basic.rs
|
// run-pass
#![feature(trait_upcasting)]
#![allow(incomplete_features)]
trait Foo: PartialEq<i32> + std::fmt::Debug + Send + Sync {
fn a(&self) -> i32 {
10
}
fn z(&self) -> i32 {
11
}
fn y(&self) -> i32 {
12
}
}
trait Bar: Foo {
fn b(&self) -> i32 {
20
}
fn w(&self) -> i32 {
21
}
}
trait Baz: Bar {
fn c(&self) -> i32 {
30
}
}
impl Foo for i32 {
fn
|
(&self) -> i32 {
100
}
}
impl Bar for i32 {
fn b(&self) -> i32 {
200
}
}
impl Baz for i32 {
fn c(&self) -> i32 {
300
}
}
fn main() {
let baz: &dyn Baz = &1;
let _: &dyn std::fmt::Debug = baz;
assert_eq!(*baz, 1);
assert_eq!(baz.a(), 100);
assert_eq!(baz.b(), 200);
assert_eq!(baz.c(), 300);
assert_eq!(baz.z(), 11);
assert_eq!(baz.y(), 12);
assert_eq!(baz.w(), 21);
let bar: &dyn Bar = baz;
let _: &dyn std::fmt::Debug = bar;
assert_eq!(*bar, 1);
assert_eq!(bar.a(), 100);
assert_eq!(bar.b(), 200);
assert_eq!(bar.z(), 11);
assert_eq!(bar.y(), 12);
assert_eq!(bar.w(), 21);
let foo: &dyn Foo = baz;
let _: &dyn std::fmt::Debug = foo;
assert_eq!(*foo, 1);
assert_eq!(foo.a(), 100);
assert_eq!(foo.z(), 11);
assert_eq!(foo.y(), 12);
let foo: &dyn Foo = bar;
let _: &dyn std::fmt::Debug = foo;
assert_eq!(*foo, 1);
assert_eq!(foo.a(), 100);
assert_eq!(foo.z(), 11);
assert_eq!(foo.y(), 12);
}
|
a
|
identifier_name
|
main.rs
|
extern crate paint;
use paint::{ Layer, Canvas, Projection, Color, Size };
struct BigWhiteCircle;
impl Layer for BigWhiteCircle {
fn draw(&self, projection: Projection)->Color {
use std::cmp:: { max, min };
let origin_x = std::u32::MAX / 2;
let offset_x = max(projection.x, origin_x) - min(projection.x, origin_x);
let origin_y = std::u32::MAX / 2;
let offset_y = max(projection.y, origin_y) - min(projection.y, origin_y);
let offset_x = offset_x as f64;
let offset_y = offset_y as f64;
let dis = std::num::Float::sqrt(offset_x * offset_x + offset_y * offset_y);
if dis < (std::u32::MAX / 2) as f64 {
Color::rgb(255, 255, 255)
} else {
Color::transparent()
}
}
}
struct Position(f32, f32, f32);
struct Light {
origin: Position,
}
struct Ball {
origin: Position,
radius: f32
}
struct Scene {
light: Light,
ball: Ball,
eye: Position
}
impl Scene {
fn new()->Scene
|
}
struct TwoLayer<U: Layer, V: Layer> {
a: U, b: V
}
impl<U: Layer, V: Layer> Layer for TwoLayer<U, V> {
fn draw(&self, projection: Projection)->Color {
projection.proxy_split(|p|self.a.draw(p), |p|self.b.draw(p))
}
}
fn main() {
let args = std::os::args();
if args.len() == 2 {
match std::path::Path::new_opt(&*args[1]) {
Some(ref path) => {
match std::io::fs::File::create(path) {
Ok(ref mut file) => {
let canvas = Canvas::new(Color::rgb(0, 200, 0), Size::new(260, 520));
let layer = TwoLayer { a: BigWhiteCircle, b: BigWhiteCircle };
canvas.render(layer, file);
},
Err(x)=>println!(" {}", x)
}
},
None =>println!("failed to open path")
}
} else {
println!("Usage: {} file_path \t write a ppm file", &*args[0])
}
}
|
{
let ball = Ball { origin: Position(3f32, 30f32, 3f32), radius: 3f32 };
let light = Light { origin: Position(0f32, 0f32, 0f32) };
let eye = Position(3f32, 0f32, 3f32);
Scene { ball: ball, light: light, eye: eye }
}
|
identifier_body
|
main.rs
|
extern crate paint;
use paint::{ Layer, Canvas, Projection, Color, Size };
struct BigWhiteCircle;
impl Layer for BigWhiteCircle {
fn draw(&self, projection: Projection)->Color {
use std::cmp:: { max, min };
let origin_x = std::u32::MAX / 2;
let offset_x = max(projection.x, origin_x) - min(projection.x, origin_x);
let origin_y = std::u32::MAX / 2;
let offset_y = max(projection.y, origin_y) - min(projection.y, origin_y);
let offset_x = offset_x as f64;
let offset_y = offset_y as f64;
let dis = std::num::Float::sqrt(offset_x * offset_x + offset_y * offset_y);
if dis < (std::u32::MAX / 2) as f64 {
Color::rgb(255, 255, 255)
} else {
Color::transparent()
}
}
}
struct Position(f32, f32, f32);
struct Light {
origin: Position,
}
struct Ball {
origin: Position,
radius: f32
}
struct Scene {
light: Light,
ball: Ball,
eye: Position
}
impl Scene {
fn new()->Scene {
let ball = Ball { origin: Position(3f32, 30f32, 3f32), radius: 3f32 };
let light = Light { origin: Position(0f32, 0f32, 0f32) };
let eye = Position(3f32, 0f32, 3f32);
Scene { ball: ball, light: light, eye: eye }
}
}
struct TwoLayer<U: Layer, V: Layer> {
a: U, b: V
}
impl<U: Layer, V: Layer> Layer for TwoLayer<U, V> {
fn draw(&self, projection: Projection)->Color {
projection.proxy_split(|p|self.a.draw(p), |p|self.b.draw(p))
|
}
fn main() {
let args = std::os::args();
if args.len() == 2 {
match std::path::Path::new_opt(&*args[1]) {
Some(ref path) => {
match std::io::fs::File::create(path) {
Ok(ref mut file) => {
let canvas = Canvas::new(Color::rgb(0, 200, 0), Size::new(260, 520));
let layer = TwoLayer { a: BigWhiteCircle, b: BigWhiteCircle };
canvas.render(layer, file);
},
Err(x)=>println!(" {}", x)
}
},
None =>println!("failed to open path")
}
} else {
println!("Usage: {} file_path \t write a ppm file", &*args[0])
}
}
|
}
|
random_line_split
|
main.rs
|
extern crate paint;
use paint::{ Layer, Canvas, Projection, Color, Size };
struct BigWhiteCircle;
impl Layer for BigWhiteCircle {
fn draw(&self, projection: Projection)->Color {
use std::cmp:: { max, min };
let origin_x = std::u32::MAX / 2;
let offset_x = max(projection.x, origin_x) - min(projection.x, origin_x);
let origin_y = std::u32::MAX / 2;
let offset_y = max(projection.y, origin_y) - min(projection.y, origin_y);
let offset_x = offset_x as f64;
let offset_y = offset_y as f64;
let dis = std::num::Float::sqrt(offset_x * offset_x + offset_y * offset_y);
if dis < (std::u32::MAX / 2) as f64 {
Color::rgb(255, 255, 255)
} else {
Color::transparent()
}
}
}
struct Position(f32, f32, f32);
struct Light {
origin: Position,
}
struct Ball {
origin: Position,
radius: f32
}
struct Scene {
light: Light,
ball: Ball,
eye: Position
}
impl Scene {
fn new()->Scene {
let ball = Ball { origin: Position(3f32, 30f32, 3f32), radius: 3f32 };
let light = Light { origin: Position(0f32, 0f32, 0f32) };
let eye = Position(3f32, 0f32, 3f32);
Scene { ball: ball, light: light, eye: eye }
}
}
struct TwoLayer<U: Layer, V: Layer> {
a: U, b: V
}
impl<U: Layer, V: Layer> Layer for TwoLayer<U, V> {
fn draw(&self, projection: Projection)->Color {
projection.proxy_split(|p|self.a.draw(p), |p|self.b.draw(p))
}
}
fn main() {
let args = std::os::args();
if args.len() == 2 {
match std::path::Path::new_opt(&*args[1]) {
Some(ref path) => {
match std::io::fs::File::create(path) {
Ok(ref mut file) =>
|
,
Err(x)=>println!(" {}", x)
}
},
None =>println!("failed to open path")
}
} else {
println!("Usage: {} file_path \t write a ppm file", &*args[0])
}
}
|
{
let canvas = Canvas::new(Color::rgb(0, 200, 0), Size::new(260, 520));
let layer = TwoLayer { a: BigWhiteCircle, b: BigWhiteCircle };
canvas.render(layer, file);
}
|
conditional_block
|
main.rs
|
extern crate paint;
use paint::{ Layer, Canvas, Projection, Color, Size };
struct BigWhiteCircle;
impl Layer for BigWhiteCircle {
fn draw(&self, projection: Projection)->Color {
use std::cmp:: { max, min };
let origin_x = std::u32::MAX / 2;
let offset_x = max(projection.x, origin_x) - min(projection.x, origin_x);
let origin_y = std::u32::MAX / 2;
let offset_y = max(projection.y, origin_y) - min(projection.y, origin_y);
let offset_x = offset_x as f64;
let offset_y = offset_y as f64;
let dis = std::num::Float::sqrt(offset_x * offset_x + offset_y * offset_y);
if dis < (std::u32::MAX / 2) as f64 {
Color::rgb(255, 255, 255)
} else {
Color::transparent()
}
}
}
struct Position(f32, f32, f32);
struct Light {
origin: Position,
}
struct Ball {
origin: Position,
radius: f32
}
struct Scene {
light: Light,
ball: Ball,
eye: Position
}
impl Scene {
fn
|
()->Scene {
let ball = Ball { origin: Position(3f32, 30f32, 3f32), radius: 3f32 };
let light = Light { origin: Position(0f32, 0f32, 0f32) };
let eye = Position(3f32, 0f32, 3f32);
Scene { ball: ball, light: light, eye: eye }
}
}
struct TwoLayer<U: Layer, V: Layer> {
a: U, b: V
}
impl<U: Layer, V: Layer> Layer for TwoLayer<U, V> {
fn draw(&self, projection: Projection)->Color {
projection.proxy_split(|p|self.a.draw(p), |p|self.b.draw(p))
}
}
fn main() {
let args = std::os::args();
if args.len() == 2 {
match std::path::Path::new_opt(&*args[1]) {
Some(ref path) => {
match std::io::fs::File::create(path) {
Ok(ref mut file) => {
let canvas = Canvas::new(Color::rgb(0, 200, 0), Size::new(260, 520));
let layer = TwoLayer { a: BigWhiteCircle, b: BigWhiteCircle };
canvas.render(layer, file);
},
Err(x)=>println!(" {}", x)
}
},
None =>println!("failed to open path")
}
} else {
println!("Usage: {} file_path \t write a ppm file", &*args[0])
}
}
|
new
|
identifier_name
|
common.rs
|
use std::str::FromStr;
use std::fmt::{Show, Formatter, Error};
use std::error;
use simple::parse;
#[deriving(Show)]
pub struct AddrError {
pub msg: String
}
impl error::Error for AddrError {
fn
|
(&self) -> &str {
self.msg.as_slice()
}
}
#[deriving(PartialEq)]
pub struct EmailAddress {
pub local: String,
pub domain: String,
}
impl EmailAddress {
pub fn new(string: &str) -> EmailAddress {
parse(string).unwrap()
}
}
impl Show for EmailAddress {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}@{}", self.local, self.domain)
}
}
impl FromStr for EmailAddress {
fn from_str(string: &str) -> Option<EmailAddress> {
match parse(string) {
Ok(s) => Some(s),
Err(_) => None,
}
}
}
|
description
|
identifier_name
|
common.rs
|
use std::str::FromStr;
use std::fmt::{Show, Formatter, Error};
use std::error;
use simple::parse;
#[deriving(Show)]
pub struct AddrError {
pub msg: String
}
impl error::Error for AddrError {
fn description(&self) -> &str {
self.msg.as_slice()
}
}
#[deriving(PartialEq)]
pub struct EmailAddress {
pub local: String,
pub domain: String,
}
impl EmailAddress {
pub fn new(string: &str) -> EmailAddress {
parse(string).unwrap()
}
}
impl Show for EmailAddress {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}@{}", self.local, self.domain)
}
}
impl FromStr for EmailAddress {
|
fn from_str(string: &str) -> Option<EmailAddress> {
match parse(string) {
Ok(s) => Some(s),
Err(_) => None,
}
}
}
|
random_line_split
|
|
common.rs
|
use std::str::FromStr;
use std::fmt::{Show, Formatter, Error};
use std::error;
use simple::parse;
#[deriving(Show)]
pub struct AddrError {
pub msg: String
}
impl error::Error for AddrError {
fn description(&self) -> &str {
self.msg.as_slice()
}
}
#[deriving(PartialEq)]
pub struct EmailAddress {
pub local: String,
pub domain: String,
}
impl EmailAddress {
pub fn new(string: &str) -> EmailAddress {
parse(string).unwrap()
}
}
impl Show for EmailAddress {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error>
|
}
impl FromStr for EmailAddress {
fn from_str(string: &str) -> Option<EmailAddress> {
match parse(string) {
Ok(s) => Some(s),
Err(_) => None,
}
}
}
|
{
write!(f, "{}@{}", self.local, self.domain)
}
|
identifier_body
|
extents.rs
|
use std::convert::TryFrom;
use std::io;
use anyhow::ensure;
use anyhow::Error;
use positioned_io::ReadAt;
use crate::assumption_failed;
use crate::read_le16;
use crate::read_le32;
#[derive(Debug)]
struct Extent {
/// The docs call this 'block' (like everything else). I've invented a different name.
part: u32,
start: u64,
len: u16,
}
pub struct TreeReader<R> {
inner: R,
pos: u64,
len: u64,
block_size: u32,
extents: Vec<Extent>,
}
impl<R> TreeReader<R>
where
R: ReadAt,
{
pub fn new(
inner: R,
block_size: u32,
size: u64,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<TreeReader<R>, Error> {
let extents = load_extent_tree(
&mut |block| crate::load_disc_bytes(&inner, block_size, block),
core,
checksum_prefix,
)?;
Ok(TreeReader::create(inner, block_size, size, extents))
}
fn create(inner: R, block_size: u32, size: u64, extents: Vec<Extent>) -> TreeReader<R> {
TreeReader {
pos: 0,
len: size,
inner,
extents,
block_size,
}
}
pub fn into_inner(self) -> R {
self.inner
}
}
enum FoundPart<'a> {
Actual(&'a Extent),
Sparse(u32),
}
fn find_part(part: u32, extents: &[Extent]) -> FoundPart {
for extent in extents {
if part < extent.part {
// we've gone past it
return FoundPart::Sparse(extent.part - part);
}
if part >= extent.part && part < extent.part + u32::from(extent.len) {
// we're inside it
return FoundPart::Actual(extent);
}
}
FoundPart::Sparse(std::u32::MAX)
}
impl<R> io::Read for TreeReader<R>
where
R: ReadAt,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.is_empty() {
return Ok(0);
}
let block_size = u64::from(self.block_size);
let wanted_block = u32::try_from(self.pos / block_size).unwrap();
let read_of_this_block = self.pos % block_size;
match find_part(wanted_block, &self.extents) {
FoundPart::Actual(extent) => {
let bytes_through_extent =
(block_size * u64::from(wanted_block - extent.part)) + read_of_this_block;
let remaining_bytes_in_extent =
(u64::from(extent.len) * block_size) - bytes_through_extent;
let to_read = std::cmp::min(remaining_bytes_in_extent, buf.len() as u64) as usize;
let to_read = std::cmp::min(to_read as u64, self.len - self.pos) as usize;
let offset = extent.start * block_size + bytes_through_extent;
let read = self.inner.read_at(offset, &mut buf[0..to_read])?;
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
FoundPart::Sparse(max) => {
let max_bytes = u64::from(max) * block_size;
let read = std::cmp::min(max_bytes, buf.len() as u64) as usize;
let read = std::cmp::min(read as u64, self.len - self.pos) as usize;
zero(&mut buf[0..read]);
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
}
}
}
impl<R> io::Seek for TreeReader<R>
where
R: ReadAt,
{
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
match pos {
io::SeekFrom::Start(set) => self.pos = set,
io::SeekFrom::Current(diff) => self.pos = (self.pos as i64 + diff) as u64,
io::SeekFrom::End(set) => {
assert!(set >= 0);
self.pos = self.len - u64::try_from(set).unwrap()
}
}
assert!(self.pos <= self.len);
Ok(self.pos)
}
}
fn add_found_extents<F>(
load_block: &mut F,
data: &[u8],
expected_depth: u16,
extents: &mut Vec<Extent>,
checksum_prefix: Option<u32>,
first_level: bool,
) -> Result<(), Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == data[0] && 0xf3 == data[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&data[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&data[6..]);
// 8..: generation, not used in standard ext4
ensure!(
expected_depth == depth,
assumption_failed(format!("depth incorrect: {}!= {}", expected_depth, depth))
);
if!first_level && checksum_prefix.is_some() {
let end_of_entries = data.len() - 4;
let on_disc = read_le32(&data[end_of_entries..(end_of_entries + 4)]);
let computed =
crate::parse::ext4_style_crc32c_le(checksum_prefix.unwrap(), &data[..end_of_entries]);
ensure!(
computed == on_disc,
assumption_failed(format!(
"extent checksum mismatch: {:08x}!= {:08x} @ {}",
on_disc,
computed,
data.len()
),)
);
}
if 0 == depth {
for en in 0..extent_entries {
let raw_extent = &data[12 + usize::from(en) * 12..];
let ee_block = read_le32(raw_extent);
let ee_len = read_le16(&raw_extent[4..]);
let ee_start_hi = read_le16(&raw_extent[6..]);
let ee_start_lo = read_le32(&raw_extent[8..]);
let ee_start = u64::from(ee_start_lo) + 0x1000 * u64::from(ee_start_hi);
extents.push(Extent {
part: ee_block,
start: ee_start,
len: ee_len,
});
}
return Ok(());
}
for en in 0..extent_entries {
let extent_idx = &data[12 + usize::from(en) * 12..];
// let ei_block = as_u32(extent_idx);
let ei_leaf_lo = read_le32(&extent_idx[4..]);
let ei_leaf_hi = read_le16(&extent_idx[8..]);
let ee_leaf: u64 = u64::from(ei_leaf_lo) + (u64::from(ei_leaf_hi) << 32);
let data = load_block(ee_leaf)?;
add_found_extents(
load_block,
&data,
depth - 1,
extents,
checksum_prefix,
false,
)?;
}
Ok(())
}
fn load_extent_tree<F>(
load_block: &mut F,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<Vec<Extent>, Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == core[0] && 0xf3 == core[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&core[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&core[6..]);
ensure!(
depth <= 5,
assumption_failed(format!("initial depth too high: {}", depth))
);
let mut extents = Vec::with_capacity(usize::from(extent_entries) + usize::from(depth) * 200);
add_found_extents(
load_block,
&core,
depth,
&mut extents,
checksum_prefix,
true,
)?;
extents.sort_by_key(|e| e.part);
Ok(extents)
}
fn zero(buf: &mut [u8]) {
unsafe { std::ptr::write_bytes(buf.as_mut_ptr(), 0u8, buf.len()) }
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use std::io::Read;
use crate::extents::Extent;
use crate::extents::TreeReader;
#[test]
fn simple_tree() {
let data = (0..255u8).collect::<Vec<u8>>();
let size = 4 + 4 * 2;
let mut reader = TreeReader::create(
data,
4,
u64::try_from(size).expect("infallible u64 conversion"),
vec![
Extent {
part: 0,
start: 10,
len: 1,
},
Extent {
part: 1,
start: 20,
len: 2,
},
],
);
let mut res = Vec::new();
assert_eq!(size, reader.read_to_end(&mut res).unwrap());
|
assert_eq!(vec![40, 41, 42, 43, 80, 81, 82, 83, 84, 85, 86, 87], res);
}
#[test]
fn zero_buf() {
let mut buf = [7u8; 5];
assert_eq!(7, buf[0]);
crate::extents::zero(&mut buf);
for i in &buf {
assert_eq!(0, *i);
}
}
}
|
random_line_split
|
|
extents.rs
|
use std::convert::TryFrom;
use std::io;
use anyhow::ensure;
use anyhow::Error;
use positioned_io::ReadAt;
use crate::assumption_failed;
use crate::read_le16;
use crate::read_le32;
#[derive(Debug)]
struct Extent {
/// The docs call this 'block' (like everything else). I've invented a different name.
part: u32,
start: u64,
len: u16,
}
pub struct TreeReader<R> {
inner: R,
pos: u64,
len: u64,
block_size: u32,
extents: Vec<Extent>,
}
impl<R> TreeReader<R>
where
R: ReadAt,
{
pub fn new(
inner: R,
block_size: u32,
size: u64,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<TreeReader<R>, Error> {
let extents = load_extent_tree(
&mut |block| crate::load_disc_bytes(&inner, block_size, block),
core,
checksum_prefix,
)?;
Ok(TreeReader::create(inner, block_size, size, extents))
}
fn create(inner: R, block_size: u32, size: u64, extents: Vec<Extent>) -> TreeReader<R> {
TreeReader {
pos: 0,
len: size,
inner,
extents,
block_size,
}
}
pub fn into_inner(self) -> R {
self.inner
}
}
enum FoundPart<'a> {
Actual(&'a Extent),
Sparse(u32),
}
fn find_part(part: u32, extents: &[Extent]) -> FoundPart {
for extent in extents {
if part < extent.part {
// we've gone past it
return FoundPart::Sparse(extent.part - part);
}
if part >= extent.part && part < extent.part + u32::from(extent.len) {
// we're inside it
return FoundPart::Actual(extent);
}
}
FoundPart::Sparse(std::u32::MAX)
}
impl<R> io::Read for TreeReader<R>
where
R: ReadAt,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.is_empty() {
return Ok(0);
}
let block_size = u64::from(self.block_size);
let wanted_block = u32::try_from(self.pos / block_size).unwrap();
let read_of_this_block = self.pos % block_size;
match find_part(wanted_block, &self.extents) {
FoundPart::Actual(extent) =>
|
FoundPart::Sparse(max) => {
let max_bytes = u64::from(max) * block_size;
let read = std::cmp::min(max_bytes, buf.len() as u64) as usize;
let read = std::cmp::min(read as u64, self.len - self.pos) as usize;
zero(&mut buf[0..read]);
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
}
}
}
impl<R> io::Seek for TreeReader<R>
where
R: ReadAt,
{
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
match pos {
io::SeekFrom::Start(set) => self.pos = set,
io::SeekFrom::Current(diff) => self.pos = (self.pos as i64 + diff) as u64,
io::SeekFrom::End(set) => {
assert!(set >= 0);
self.pos = self.len - u64::try_from(set).unwrap()
}
}
assert!(self.pos <= self.len);
Ok(self.pos)
}
}
fn add_found_extents<F>(
load_block: &mut F,
data: &[u8],
expected_depth: u16,
extents: &mut Vec<Extent>,
checksum_prefix: Option<u32>,
first_level: bool,
) -> Result<(), Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == data[0] && 0xf3 == data[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&data[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&data[6..]);
// 8..: generation, not used in standard ext4
ensure!(
expected_depth == depth,
assumption_failed(format!("depth incorrect: {}!= {}", expected_depth, depth))
);
if!first_level && checksum_prefix.is_some() {
let end_of_entries = data.len() - 4;
let on_disc = read_le32(&data[end_of_entries..(end_of_entries + 4)]);
let computed =
crate::parse::ext4_style_crc32c_le(checksum_prefix.unwrap(), &data[..end_of_entries]);
ensure!(
computed == on_disc,
assumption_failed(format!(
"extent checksum mismatch: {:08x}!= {:08x} @ {}",
on_disc,
computed,
data.len()
),)
);
}
if 0 == depth {
for en in 0..extent_entries {
let raw_extent = &data[12 + usize::from(en) * 12..];
let ee_block = read_le32(raw_extent);
let ee_len = read_le16(&raw_extent[4..]);
let ee_start_hi = read_le16(&raw_extent[6..]);
let ee_start_lo = read_le32(&raw_extent[8..]);
let ee_start = u64::from(ee_start_lo) + 0x1000 * u64::from(ee_start_hi);
extents.push(Extent {
part: ee_block,
start: ee_start,
len: ee_len,
});
}
return Ok(());
}
for en in 0..extent_entries {
let extent_idx = &data[12 + usize::from(en) * 12..];
// let ei_block = as_u32(extent_idx);
let ei_leaf_lo = read_le32(&extent_idx[4..]);
let ei_leaf_hi = read_le16(&extent_idx[8..]);
let ee_leaf: u64 = u64::from(ei_leaf_lo) + (u64::from(ei_leaf_hi) << 32);
let data = load_block(ee_leaf)?;
add_found_extents(
load_block,
&data,
depth - 1,
extents,
checksum_prefix,
false,
)?;
}
Ok(())
}
fn load_extent_tree<F>(
load_block: &mut F,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<Vec<Extent>, Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == core[0] && 0xf3 == core[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&core[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&core[6..]);
ensure!(
depth <= 5,
assumption_failed(format!("initial depth too high: {}", depth))
);
let mut extents = Vec::with_capacity(usize::from(extent_entries) + usize::from(depth) * 200);
add_found_extents(
load_block,
&core,
depth,
&mut extents,
checksum_prefix,
true,
)?;
extents.sort_by_key(|e| e.part);
Ok(extents)
}
fn zero(buf: &mut [u8]) {
unsafe { std::ptr::write_bytes(buf.as_mut_ptr(), 0u8, buf.len()) }
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use std::io::Read;
use crate::extents::Extent;
use crate::extents::TreeReader;
#[test]
fn simple_tree() {
let data = (0..255u8).collect::<Vec<u8>>();
let size = 4 + 4 * 2;
let mut reader = TreeReader::create(
data,
4,
u64::try_from(size).expect("infallible u64 conversion"),
vec![
Extent {
part: 0,
start: 10,
len: 1,
},
Extent {
part: 1,
start: 20,
len: 2,
},
],
);
let mut res = Vec::new();
assert_eq!(size, reader.read_to_end(&mut res).unwrap());
assert_eq!(vec![40, 41, 42, 43, 80, 81, 82, 83, 84, 85, 86, 87], res);
}
#[test]
fn zero_buf() {
let mut buf = [7u8; 5];
assert_eq!(7, buf[0]);
crate::extents::zero(&mut buf);
for i in &buf {
assert_eq!(0, *i);
}
}
}
|
{
let bytes_through_extent =
(block_size * u64::from(wanted_block - extent.part)) + read_of_this_block;
let remaining_bytes_in_extent =
(u64::from(extent.len) * block_size) - bytes_through_extent;
let to_read = std::cmp::min(remaining_bytes_in_extent, buf.len() as u64) as usize;
let to_read = std::cmp::min(to_read as u64, self.len - self.pos) as usize;
let offset = extent.start * block_size + bytes_through_extent;
let read = self.inner.read_at(offset, &mut buf[0..to_read])?;
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
|
conditional_block
|
extents.rs
|
use std::convert::TryFrom;
use std::io;
use anyhow::ensure;
use anyhow::Error;
use positioned_io::ReadAt;
use crate::assumption_failed;
use crate::read_le16;
use crate::read_le32;
#[derive(Debug)]
struct Extent {
/// The docs call this 'block' (like everything else). I've invented a different name.
part: u32,
start: u64,
len: u16,
}
pub struct TreeReader<R> {
inner: R,
pos: u64,
len: u64,
block_size: u32,
extents: Vec<Extent>,
}
impl<R> TreeReader<R>
where
R: ReadAt,
{
pub fn new(
inner: R,
block_size: u32,
size: u64,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<TreeReader<R>, Error> {
let extents = load_extent_tree(
&mut |block| crate::load_disc_bytes(&inner, block_size, block),
core,
checksum_prefix,
)?;
Ok(TreeReader::create(inner, block_size, size, extents))
}
fn create(inner: R, block_size: u32, size: u64, extents: Vec<Extent>) -> TreeReader<R> {
TreeReader {
pos: 0,
len: size,
inner,
extents,
block_size,
}
}
pub fn into_inner(self) -> R {
self.inner
}
}
enum FoundPart<'a> {
Actual(&'a Extent),
Sparse(u32),
}
fn find_part(part: u32, extents: &[Extent]) -> FoundPart {
for extent in extents {
if part < extent.part {
// we've gone past it
return FoundPart::Sparse(extent.part - part);
}
if part >= extent.part && part < extent.part + u32::from(extent.len) {
// we're inside it
return FoundPart::Actual(extent);
}
}
FoundPart::Sparse(std::u32::MAX)
}
impl<R> io::Read for TreeReader<R>
where
R: ReadAt,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.is_empty() {
return Ok(0);
}
let block_size = u64::from(self.block_size);
let wanted_block = u32::try_from(self.pos / block_size).unwrap();
let read_of_this_block = self.pos % block_size;
match find_part(wanted_block, &self.extents) {
FoundPart::Actual(extent) => {
let bytes_through_extent =
(block_size * u64::from(wanted_block - extent.part)) + read_of_this_block;
let remaining_bytes_in_extent =
(u64::from(extent.len) * block_size) - bytes_through_extent;
let to_read = std::cmp::min(remaining_bytes_in_extent, buf.len() as u64) as usize;
let to_read = std::cmp::min(to_read as u64, self.len - self.pos) as usize;
let offset = extent.start * block_size + bytes_through_extent;
let read = self.inner.read_at(offset, &mut buf[0..to_read])?;
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
FoundPart::Sparse(max) => {
let max_bytes = u64::from(max) * block_size;
let read = std::cmp::min(max_bytes, buf.len() as u64) as usize;
let read = std::cmp::min(read as u64, self.len - self.pos) as usize;
zero(&mut buf[0..read]);
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
}
}
}
impl<R> io::Seek for TreeReader<R>
where
R: ReadAt,
{
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
match pos {
io::SeekFrom::Start(set) => self.pos = set,
io::SeekFrom::Current(diff) => self.pos = (self.pos as i64 + diff) as u64,
io::SeekFrom::End(set) => {
assert!(set >= 0);
self.pos = self.len - u64::try_from(set).unwrap()
}
}
assert!(self.pos <= self.len);
Ok(self.pos)
}
}
fn add_found_extents<F>(
load_block: &mut F,
data: &[u8],
expected_depth: u16,
extents: &mut Vec<Extent>,
checksum_prefix: Option<u32>,
first_level: bool,
) -> Result<(), Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == data[0] && 0xf3 == data[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&data[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&data[6..]);
// 8..: generation, not used in standard ext4
ensure!(
expected_depth == depth,
assumption_failed(format!("depth incorrect: {}!= {}", expected_depth, depth))
);
if!first_level && checksum_prefix.is_some() {
let end_of_entries = data.len() - 4;
let on_disc = read_le32(&data[end_of_entries..(end_of_entries + 4)]);
let computed =
crate::parse::ext4_style_crc32c_le(checksum_prefix.unwrap(), &data[..end_of_entries]);
ensure!(
computed == on_disc,
assumption_failed(format!(
"extent checksum mismatch: {:08x}!= {:08x} @ {}",
on_disc,
computed,
data.len()
),)
);
}
if 0 == depth {
for en in 0..extent_entries {
let raw_extent = &data[12 + usize::from(en) * 12..];
let ee_block = read_le32(raw_extent);
let ee_len = read_le16(&raw_extent[4..]);
let ee_start_hi = read_le16(&raw_extent[6..]);
let ee_start_lo = read_le32(&raw_extent[8..]);
let ee_start = u64::from(ee_start_lo) + 0x1000 * u64::from(ee_start_hi);
extents.push(Extent {
part: ee_block,
start: ee_start,
len: ee_len,
});
}
return Ok(());
}
for en in 0..extent_entries {
let extent_idx = &data[12 + usize::from(en) * 12..];
// let ei_block = as_u32(extent_idx);
let ei_leaf_lo = read_le32(&extent_idx[4..]);
let ei_leaf_hi = read_le16(&extent_idx[8..]);
let ee_leaf: u64 = u64::from(ei_leaf_lo) + (u64::from(ei_leaf_hi) << 32);
let data = load_block(ee_leaf)?;
add_found_extents(
load_block,
&data,
depth - 1,
extents,
checksum_prefix,
false,
)?;
}
Ok(())
}
fn load_extent_tree<F>(
load_block: &mut F,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<Vec<Extent>, Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == core[0] && 0xf3 == core[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&core[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&core[6..]);
ensure!(
depth <= 5,
assumption_failed(format!("initial depth too high: {}", depth))
);
let mut extents = Vec::with_capacity(usize::from(extent_entries) + usize::from(depth) * 200);
add_found_extents(
load_block,
&core,
depth,
&mut extents,
checksum_prefix,
true,
)?;
extents.sort_by_key(|e| e.part);
Ok(extents)
}
fn
|
(buf: &mut [u8]) {
unsafe { std::ptr::write_bytes(buf.as_mut_ptr(), 0u8, buf.len()) }
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use std::io::Read;
use crate::extents::Extent;
use crate::extents::TreeReader;
#[test]
fn simple_tree() {
let data = (0..255u8).collect::<Vec<u8>>();
let size = 4 + 4 * 2;
let mut reader = TreeReader::create(
data,
4,
u64::try_from(size).expect("infallible u64 conversion"),
vec![
Extent {
part: 0,
start: 10,
len: 1,
},
Extent {
part: 1,
start: 20,
len: 2,
},
],
);
let mut res = Vec::new();
assert_eq!(size, reader.read_to_end(&mut res).unwrap());
assert_eq!(vec![40, 41, 42, 43, 80, 81, 82, 83, 84, 85, 86, 87], res);
}
#[test]
fn zero_buf() {
let mut buf = [7u8; 5];
assert_eq!(7, buf[0]);
crate::extents::zero(&mut buf);
for i in &buf {
assert_eq!(0, *i);
}
}
}
|
zero
|
identifier_name
|
extents.rs
|
use std::convert::TryFrom;
use std::io;
use anyhow::ensure;
use anyhow::Error;
use positioned_io::ReadAt;
use crate::assumption_failed;
use crate::read_le16;
use crate::read_le32;
#[derive(Debug)]
struct Extent {
/// The docs call this 'block' (like everything else). I've invented a different name.
part: u32,
start: u64,
len: u16,
}
pub struct TreeReader<R> {
inner: R,
pos: u64,
len: u64,
block_size: u32,
extents: Vec<Extent>,
}
impl<R> TreeReader<R>
where
R: ReadAt,
{
pub fn new(
inner: R,
block_size: u32,
size: u64,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<TreeReader<R>, Error> {
let extents = load_extent_tree(
&mut |block| crate::load_disc_bytes(&inner, block_size, block),
core,
checksum_prefix,
)?;
Ok(TreeReader::create(inner, block_size, size, extents))
}
fn create(inner: R, block_size: u32, size: u64, extents: Vec<Extent>) -> TreeReader<R> {
TreeReader {
pos: 0,
len: size,
inner,
extents,
block_size,
}
}
pub fn into_inner(self) -> R {
self.inner
}
}
enum FoundPart<'a> {
Actual(&'a Extent),
Sparse(u32),
}
fn find_part(part: u32, extents: &[Extent]) -> FoundPart {
for extent in extents {
if part < extent.part {
// we've gone past it
return FoundPart::Sparse(extent.part - part);
}
if part >= extent.part && part < extent.part + u32::from(extent.len) {
// we're inside it
return FoundPart::Actual(extent);
}
}
FoundPart::Sparse(std::u32::MAX)
}
impl<R> io::Read for TreeReader<R>
where
R: ReadAt,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>
|
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
FoundPart::Sparse(max) => {
let max_bytes = u64::from(max) * block_size;
let read = std::cmp::min(max_bytes, buf.len() as u64) as usize;
let read = std::cmp::min(read as u64, self.len - self.pos) as usize;
zero(&mut buf[0..read]);
self.pos += u64::try_from(read).expect("infallible u64 conversion");
Ok(read)
}
}
}
}
impl<R> io::Seek for TreeReader<R>
where
R: ReadAt,
{
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
match pos {
io::SeekFrom::Start(set) => self.pos = set,
io::SeekFrom::Current(diff) => self.pos = (self.pos as i64 + diff) as u64,
io::SeekFrom::End(set) => {
assert!(set >= 0);
self.pos = self.len - u64::try_from(set).unwrap()
}
}
assert!(self.pos <= self.len);
Ok(self.pos)
}
}
fn add_found_extents<F>(
load_block: &mut F,
data: &[u8],
expected_depth: u16,
extents: &mut Vec<Extent>,
checksum_prefix: Option<u32>,
first_level: bool,
) -> Result<(), Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == data[0] && 0xf3 == data[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&data[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&data[6..]);
// 8..: generation, not used in standard ext4
ensure!(
expected_depth == depth,
assumption_failed(format!("depth incorrect: {}!= {}", expected_depth, depth))
);
if!first_level && checksum_prefix.is_some() {
let end_of_entries = data.len() - 4;
let on_disc = read_le32(&data[end_of_entries..(end_of_entries + 4)]);
let computed =
crate::parse::ext4_style_crc32c_le(checksum_prefix.unwrap(), &data[..end_of_entries]);
ensure!(
computed == on_disc,
assumption_failed(format!(
"extent checksum mismatch: {:08x}!= {:08x} @ {}",
on_disc,
computed,
data.len()
),)
);
}
if 0 == depth {
for en in 0..extent_entries {
let raw_extent = &data[12 + usize::from(en) * 12..];
let ee_block = read_le32(raw_extent);
let ee_len = read_le16(&raw_extent[4..]);
let ee_start_hi = read_le16(&raw_extent[6..]);
let ee_start_lo = read_le32(&raw_extent[8..]);
let ee_start = u64::from(ee_start_lo) + 0x1000 * u64::from(ee_start_hi);
extents.push(Extent {
part: ee_block,
start: ee_start,
len: ee_len,
});
}
return Ok(());
}
for en in 0..extent_entries {
let extent_idx = &data[12 + usize::from(en) * 12..];
// let ei_block = as_u32(extent_idx);
let ei_leaf_lo = read_le32(&extent_idx[4..]);
let ei_leaf_hi = read_le16(&extent_idx[8..]);
let ee_leaf: u64 = u64::from(ei_leaf_lo) + (u64::from(ei_leaf_hi) << 32);
let data = load_block(ee_leaf)?;
add_found_extents(
load_block,
&data,
depth - 1,
extents,
checksum_prefix,
false,
)?;
}
Ok(())
}
fn load_extent_tree<F>(
load_block: &mut F,
core: [u8; crate::INODE_CORE_SIZE],
checksum_prefix: Option<u32>,
) -> Result<Vec<Extent>, Error>
where
F: FnMut(u64) -> Result<Vec<u8>, Error>,
{
ensure!(
0x0a == core[0] && 0xf3 == core[1],
assumption_failed("invalid extent magic")
);
let extent_entries = read_le16(&core[2..]);
// 4..: max; doesn't seem to be useful during read
let depth = read_le16(&core[6..]);
ensure!(
depth <= 5,
assumption_failed(format!("initial depth too high: {}", depth))
);
let mut extents = Vec::with_capacity(usize::from(extent_entries) + usize::from(depth) * 200);
add_found_extents(
load_block,
&core,
depth,
&mut extents,
checksum_prefix,
true,
)?;
extents.sort_by_key(|e| e.part);
Ok(extents)
}
fn zero(buf: &mut [u8]) {
unsafe { std::ptr::write_bytes(buf.as_mut_ptr(), 0u8, buf.len()) }
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use std::io::Read;
use crate::extents::Extent;
use crate::extents::TreeReader;
#[test]
fn simple_tree() {
let data = (0..255u8).collect::<Vec<u8>>();
let size = 4 + 4 * 2;
let mut reader = TreeReader::create(
data,
4,
u64::try_from(size).expect("infallible u64 conversion"),
vec![
Extent {
part: 0,
start: 10,
len: 1,
},
Extent {
part: 1,
start: 20,
len: 2,
},
],
);
let mut res = Vec::new();
assert_eq!(size, reader.read_to_end(&mut res).unwrap());
assert_eq!(vec![40, 41, 42, 43, 80, 81, 82, 83, 84, 85, 86, 87], res);
}
#[test]
fn zero_buf() {
let mut buf = [7u8; 5];
assert_eq!(7, buf[0]);
crate::extents::zero(&mut buf);
for i in &buf {
assert_eq!(0, *i);
}
}
}
|
{
if buf.is_empty() {
return Ok(0);
}
let block_size = u64::from(self.block_size);
let wanted_block = u32::try_from(self.pos / block_size).unwrap();
let read_of_this_block = self.pos % block_size;
match find_part(wanted_block, &self.extents) {
FoundPart::Actual(extent) => {
let bytes_through_extent =
(block_size * u64::from(wanted_block - extent.part)) + read_of_this_block;
let remaining_bytes_in_extent =
(u64::from(extent.len) * block_size) - bytes_through_extent;
let to_read = std::cmp::min(remaining_bytes_in_extent, buf.len() as u64) as usize;
let to_read = std::cmp::min(to_read as u64, self.len - self.pos) as usize;
let offset = extent.start * block_size + bytes_through_extent;
let read = self.inner.read_at(offset, &mut buf[0..to_read])?;
|
identifier_body
|
error.rs
|
use postgres::error::Error as PostgresError;
use std::error::Error as StdError;
use std::fmt;
use std::result::Result as StdResult;
pub type Result<T> = StdResult<T, Error>;
#[derive(Debug)]
pub enum Error {
Postgres(PostgresError),
CategoryNameEmpty,
CategoryNotFound,
ColumnNotFound,
FeedNameEmpty,
FeedUrlEmpty,
FeedNotFound,
TokenNotFound,
}
impl From<PostgresError> for Error {
fn from(err: PostgresError) -> Error {
Error::Postgres(err)
}
}
impl StdError for Error {
fn description(&self) -> &str {
|
Error::CategoryNotFound => "Category not found",
Error::ColumnNotFound => "Can't fetch a column",
Error::FeedNameEmpty => "Feed name is empty",
Error::FeedUrlEmpty => "Feed URL is empty",
Error::FeedNotFound => "Feed not found",
Error::TokenNotFound => "Token not found",
}
}
fn cause(&self) -> Option<&StdError> {
match *self {
Error::Postgres(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
write!(out, "{}", self.description())
}
}
|
match *self {
Error::Postgres(ref err) => err.description(),
Error::CategoryNameEmpty => "Category name is empty",
|
random_line_split
|
error.rs
|
use postgres::error::Error as PostgresError;
use std::error::Error as StdError;
use std::fmt;
use std::result::Result as StdResult;
pub type Result<T> = StdResult<T, Error>;
#[derive(Debug)]
pub enum Error {
Postgres(PostgresError),
CategoryNameEmpty,
CategoryNotFound,
ColumnNotFound,
FeedNameEmpty,
FeedUrlEmpty,
FeedNotFound,
TokenNotFound,
}
impl From<PostgresError> for Error {
fn from(err: PostgresError) -> Error {
Error::Postgres(err)
}
}
impl StdError for Error {
fn description(&self) -> &str
|
fn cause(&self) -> Option<&StdError> {
match *self {
Error::Postgres(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
write!(out, "{}", self.description())
}
}
|
{
match *self {
Error::Postgres(ref err) => err.description(),
Error::CategoryNameEmpty => "Category name is empty",
Error::CategoryNotFound => "Category not found",
Error::ColumnNotFound => "Can't fetch a column",
Error::FeedNameEmpty => "Feed name is empty",
Error::FeedUrlEmpty => "Feed URL is empty",
Error::FeedNotFound => "Feed not found",
Error::TokenNotFound => "Token not found",
}
}
|
identifier_body
|
error.rs
|
use postgres::error::Error as PostgresError;
use std::error::Error as StdError;
use std::fmt;
use std::result::Result as StdResult;
pub type Result<T> = StdResult<T, Error>;
#[derive(Debug)]
pub enum Error {
Postgres(PostgresError),
CategoryNameEmpty,
CategoryNotFound,
ColumnNotFound,
FeedNameEmpty,
FeedUrlEmpty,
FeedNotFound,
TokenNotFound,
}
impl From<PostgresError> for Error {
fn from(err: PostgresError) -> Error {
Error::Postgres(err)
}
}
impl StdError for Error {
fn description(&self) -> &str {
match *self {
Error::Postgres(ref err) => err.description(),
Error::CategoryNameEmpty => "Category name is empty",
Error::CategoryNotFound => "Category not found",
Error::ColumnNotFound => "Can't fetch a column",
Error::FeedNameEmpty => "Feed name is empty",
Error::FeedUrlEmpty => "Feed URL is empty",
Error::FeedNotFound => "Feed not found",
Error::TokenNotFound => "Token not found",
}
}
fn
|
(&self) -> Option<&StdError> {
match *self {
Error::Postgres(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
write!(out, "{}", self.description())
}
}
|
cause
|
identifier_name
|
abi.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Os::*;
pub use self::Abi::*;
pub use self::Architecture::*;
pub use self::AbiArchitecture::*;
use std::fmt;
#[deriving(PartialEq)]
pub enum Os {
OsWindows,
OsMacos,
OsLinux,
OsAndroid,
OsFreebsd,
OsiOS,
OsDragonfly,
}
impl Copy for Os {}
#[deriving(PartialEq, Eq, Hash, Encodable, Decodable, Clone)]
pub enum
|
{
// NB: This ordering MUST match the AbiDatas array below.
// (This is ensured by the test indices_are_correct().)
// Single platform ABIs come first (`for_arch()` relies on this)
Cdecl,
Stdcall,
Fastcall,
Aapcs,
Win64,
// Multiplatform ABIs second
Rust,
C,
System,
RustIntrinsic,
RustCall,
}
impl Copy for Abi {}
#[allow(non_camel_case_types)]
#[deriving(PartialEq)]
pub enum Architecture {
X86,
X86_64,
Arm,
Mips,
Mipsel
}
impl Copy for Architecture {}
pub struct AbiData {
abi: Abi,
// Name of this ABI as we like it called.
name: &'static str,
}
impl Copy for AbiData {}
pub enum AbiArchitecture {
/// Not a real ABI (e.g., intrinsic)
RustArch,
/// An ABI that specifies cross-platform defaults (e.g., "C")
AllArch,
/// Multiple architectures (bitset)
Archs(u32)
}
#[allow(non_upper_case_globals)]
impl Copy for AbiArchitecture {}
#[allow(non_upper_case_globals)]
static AbiDatas: &'static [AbiData] = &[
// Platform-specific ABIs
AbiData {abi: Cdecl, name: "cdecl" },
AbiData {abi: Stdcall, name: "stdcall" },
AbiData {abi: Fastcall, name:"fastcall" },
AbiData {abi: Aapcs, name: "aapcs" },
AbiData {abi: Win64, name: "win64" },
// Cross-platform ABIs
//
// NB: Do not adjust this ordering without
// adjusting the indices below.
AbiData {abi: Rust, name: "Rust" },
AbiData {abi: C, name: "C" },
AbiData {abi: System, name: "system" },
AbiData {abi: RustIntrinsic, name: "rust-intrinsic" },
AbiData {abi: RustCall, name: "rust-call" },
];
/// Returns the ABI with the given name (if any).
pub fn lookup(name: &str) -> Option<Abi> {
AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi)
}
pub fn all_names() -> Vec<&'static str> {
AbiDatas.iter().map(|d| d.name).collect()
}
impl Abi {
#[inline]
pub fn index(&self) -> uint {
*self as uint
}
#[inline]
pub fn data(&self) -> &'static AbiData {
&AbiDatas[self.index()]
}
pub fn name(&self) -> &'static str {
self.data().name
}
}
impl fmt::Show for Abi {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\"{}\"", self.name())
}
}
impl fmt::Show for Os {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OsLinux => "linux".fmt(f),
OsWindows => "windows".fmt(f),
OsMacos => "macos".fmt(f),
OsiOS => "ios".fmt(f),
OsAndroid => "android".fmt(f),
OsFreebsd => "freebsd".fmt(f),
OsDragonfly => "dragonfly".fmt(f)
}
}
}
#[allow(non_snake_case)]
#[test]
fn lookup_Rust() {
let abi = lookup("Rust");
assert!(abi.is_some() && abi.unwrap().data().name == "Rust");
}
#[test]
fn lookup_cdecl() {
let abi = lookup("cdecl");
assert!(abi.is_some() && abi.unwrap().data().name == "cdecl");
}
#[test]
fn lookup_baz() {
let abi = lookup("baz");
assert!(abi.is_none());
}
#[test]
fn indices_are_correct() {
for (i, abi_data) in AbiDatas.iter().enumerate() {
assert_eq!(i, abi_data.abi.index());
}
}
|
Abi
|
identifier_name
|
abi.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Os::*;
pub use self::Abi::*;
pub use self::Architecture::*;
pub use self::AbiArchitecture::*;
use std::fmt;
#[deriving(PartialEq)]
pub enum Os {
OsWindows,
OsMacos,
OsLinux,
OsAndroid,
OsFreebsd,
OsiOS,
OsDragonfly,
}
impl Copy for Os {}
#[deriving(PartialEq, Eq, Hash, Encodable, Decodable, Clone)]
pub enum Abi {
// NB: This ordering MUST match the AbiDatas array below.
// (This is ensured by the test indices_are_correct().)
// Single platform ABIs come first (`for_arch()` relies on this)
Cdecl,
Stdcall,
Fastcall,
Aapcs,
Win64,
// Multiplatform ABIs second
Rust,
C,
System,
RustIntrinsic,
RustCall,
}
impl Copy for Abi {}
#[allow(non_camel_case_types)]
#[deriving(PartialEq)]
pub enum Architecture {
X86,
|
X86_64,
Arm,
Mips,
Mipsel
}
impl Copy for Architecture {}
pub struct AbiData {
abi: Abi,
// Name of this ABI as we like it called.
name: &'static str,
}
impl Copy for AbiData {}
pub enum AbiArchitecture {
/// Not a real ABI (e.g., intrinsic)
RustArch,
/// An ABI that specifies cross-platform defaults (e.g., "C")
AllArch,
/// Multiple architectures (bitset)
Archs(u32)
}
#[allow(non_upper_case_globals)]
impl Copy for AbiArchitecture {}
#[allow(non_upper_case_globals)]
static AbiDatas: &'static [AbiData] = &[
// Platform-specific ABIs
AbiData {abi: Cdecl, name: "cdecl" },
AbiData {abi: Stdcall, name: "stdcall" },
AbiData {abi: Fastcall, name:"fastcall" },
AbiData {abi: Aapcs, name: "aapcs" },
AbiData {abi: Win64, name: "win64" },
// Cross-platform ABIs
//
// NB: Do not adjust this ordering without
// adjusting the indices below.
AbiData {abi: Rust, name: "Rust" },
AbiData {abi: C, name: "C" },
AbiData {abi: System, name: "system" },
AbiData {abi: RustIntrinsic, name: "rust-intrinsic" },
AbiData {abi: RustCall, name: "rust-call" },
];
/// Returns the ABI with the given name (if any).
pub fn lookup(name: &str) -> Option<Abi> {
AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi)
}
pub fn all_names() -> Vec<&'static str> {
AbiDatas.iter().map(|d| d.name).collect()
}
impl Abi {
#[inline]
pub fn index(&self) -> uint {
*self as uint
}
#[inline]
pub fn data(&self) -> &'static AbiData {
&AbiDatas[self.index()]
}
pub fn name(&self) -> &'static str {
self.data().name
}
}
impl fmt::Show for Abi {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\"{}\"", self.name())
}
}
impl fmt::Show for Os {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OsLinux => "linux".fmt(f),
OsWindows => "windows".fmt(f),
OsMacos => "macos".fmt(f),
OsiOS => "ios".fmt(f),
OsAndroid => "android".fmt(f),
OsFreebsd => "freebsd".fmt(f),
OsDragonfly => "dragonfly".fmt(f)
}
}
}
#[allow(non_snake_case)]
#[test]
fn lookup_Rust() {
let abi = lookup("Rust");
assert!(abi.is_some() && abi.unwrap().data().name == "Rust");
}
#[test]
fn lookup_cdecl() {
let abi = lookup("cdecl");
assert!(abi.is_some() && abi.unwrap().data().name == "cdecl");
}
#[test]
fn lookup_baz() {
let abi = lookup("baz");
assert!(abi.is_none());
}
#[test]
fn indices_are_correct() {
for (i, abi_data) in AbiDatas.iter().enumerate() {
assert_eq!(i, abi_data.abi.index());
}
}
|
random_line_split
|
|
abi.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Os::*;
pub use self::Abi::*;
pub use self::Architecture::*;
pub use self::AbiArchitecture::*;
use std::fmt;
#[deriving(PartialEq)]
pub enum Os {
OsWindows,
OsMacos,
OsLinux,
OsAndroid,
OsFreebsd,
OsiOS,
OsDragonfly,
}
impl Copy for Os {}
#[deriving(PartialEq, Eq, Hash, Encodable, Decodable, Clone)]
pub enum Abi {
// NB: This ordering MUST match the AbiDatas array below.
// (This is ensured by the test indices_are_correct().)
// Single platform ABIs come first (`for_arch()` relies on this)
Cdecl,
Stdcall,
Fastcall,
Aapcs,
Win64,
// Multiplatform ABIs second
Rust,
C,
System,
RustIntrinsic,
RustCall,
}
impl Copy for Abi {}
#[allow(non_camel_case_types)]
#[deriving(PartialEq)]
pub enum Architecture {
X86,
X86_64,
Arm,
Mips,
Mipsel
}
impl Copy for Architecture {}
pub struct AbiData {
abi: Abi,
// Name of this ABI as we like it called.
name: &'static str,
}
impl Copy for AbiData {}
pub enum AbiArchitecture {
/// Not a real ABI (e.g., intrinsic)
RustArch,
/// An ABI that specifies cross-platform defaults (e.g., "C")
AllArch,
/// Multiple architectures (bitset)
Archs(u32)
}
#[allow(non_upper_case_globals)]
impl Copy for AbiArchitecture {}
#[allow(non_upper_case_globals)]
static AbiDatas: &'static [AbiData] = &[
// Platform-specific ABIs
AbiData {abi: Cdecl, name: "cdecl" },
AbiData {abi: Stdcall, name: "stdcall" },
AbiData {abi: Fastcall, name:"fastcall" },
AbiData {abi: Aapcs, name: "aapcs" },
AbiData {abi: Win64, name: "win64" },
// Cross-platform ABIs
//
// NB: Do not adjust this ordering without
// adjusting the indices below.
AbiData {abi: Rust, name: "Rust" },
AbiData {abi: C, name: "C" },
AbiData {abi: System, name: "system" },
AbiData {abi: RustIntrinsic, name: "rust-intrinsic" },
AbiData {abi: RustCall, name: "rust-call" },
];
/// Returns the ABI with the given name (if any).
pub fn lookup(name: &str) -> Option<Abi> {
AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi)
}
pub fn all_names() -> Vec<&'static str> {
AbiDatas.iter().map(|d| d.name).collect()
}
impl Abi {
#[inline]
pub fn index(&self) -> uint {
*self as uint
}
#[inline]
pub fn data(&self) -> &'static AbiData {
&AbiDatas[self.index()]
}
pub fn name(&self) -> &'static str
|
}
impl fmt::Show for Abi {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\"{}\"", self.name())
}
}
impl fmt::Show for Os {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OsLinux => "linux".fmt(f),
OsWindows => "windows".fmt(f),
OsMacos => "macos".fmt(f),
OsiOS => "ios".fmt(f),
OsAndroid => "android".fmt(f),
OsFreebsd => "freebsd".fmt(f),
OsDragonfly => "dragonfly".fmt(f)
}
}
}
#[allow(non_snake_case)]
#[test]
fn lookup_Rust() {
let abi = lookup("Rust");
assert!(abi.is_some() && abi.unwrap().data().name == "Rust");
}
#[test]
fn lookup_cdecl() {
let abi = lookup("cdecl");
assert!(abi.is_some() && abi.unwrap().data().name == "cdecl");
}
#[test]
fn lookup_baz() {
let abi = lookup("baz");
assert!(abi.is_none());
}
#[test]
fn indices_are_correct() {
for (i, abi_data) in AbiDatas.iter().enumerate() {
assert_eq!(i, abi_data.abi.index());
}
}
|
{
self.data().name
}
|
identifier_body
|
send_serial.rs
|
use printspool_machine::components::ControllerConfig;
use super::*;
use crate::gcode_parser::parse_gcode;
pub fn send_serial(
effects: &mut Vec<Effect>,
gcode_line: GCodeLine,
context: &mut Context,
is_polling: bool,
) {
// Allow for a byte of spacing between receiving and sending over the serial port
// The choice of 1 byte was arbitrary but sending without a spin lock seems to
// loose GCodes.
// let seconds_per_bit: u64 = (60 * 1000 * 1000 / context.baud_rate).into();
// spin_sleep::sleep(Duration::from_micros(8 * seconds_per_bit));
|
.map_err(|err| warn!("{}", err));
let ControllerConfig {
long_running_code_timeout,
fast_code_timeout,
long_running_codes,
blocking_codes,
..
} = &context.controller.model;
let mut duration = fast_code_timeout;
let mut is_blocking = false;
if let Ok(Some((mnemonic, major_number))) = parser_result {
let gcode_macro = format!("{}{}", mnemonic, major_number);
if long_running_codes.contains(&gcode_macro) {
duration = long_running_code_timeout
}
is_blocking = blocking_codes.contains(&gcode_macro)
};
effects.push(Effect::SendSerial(gcode_line));
if is_blocking {
effects.push(
Effect::CancelDelay { key: "tickle_delay".to_string() }
);
} else {
effects.push(
Effect::Delay {
key: "tickle_delay".to_string(),
// TODO: configurable delayFromGreetingToReady
duration: Duration::from_millis(*duration),
event: TickleSerialPort,
},
);
}
}
|
// eprintln!("TX: {:?}", gcode_line.gcode);
context.push_gcode_tx(gcode_line.gcode.clone(), is_polling);
let parser_result = parse_gcode(&gcode_line.gcode, context)
|
random_line_split
|
send_serial.rs
|
use printspool_machine::components::ControllerConfig;
use super::*;
use crate::gcode_parser::parse_gcode;
pub fn send_serial(
effects: &mut Vec<Effect>,
gcode_line: GCodeLine,
context: &mut Context,
is_polling: bool,
)
|
} = &context.controller.model;
let mut duration = fast_code_timeout;
let mut is_blocking = false;
if let Ok(Some((mnemonic, major_number))) = parser_result {
let gcode_macro = format!("{}{}", mnemonic, major_number);
if long_running_codes.contains(&gcode_macro) {
duration = long_running_code_timeout
}
is_blocking = blocking_codes.contains(&gcode_macro)
};
effects.push(Effect::SendSerial(gcode_line));
if is_blocking {
effects.push(
Effect::CancelDelay { key: "tickle_delay".to_string() }
);
} else {
effects.push(
Effect::Delay {
key: "tickle_delay".to_string(),
// TODO: configurable delayFromGreetingToReady
duration: Duration::from_millis(*duration),
event: TickleSerialPort,
},
);
}
}
|
{
// Allow for a byte of spacing between receiving and sending over the serial port
// The choice of 1 byte was arbitrary but sending without a spin lock seems to
// loose GCodes.
// let seconds_per_bit: u64 = (60 * 1000 * 1000 / context.baud_rate).into();
// spin_sleep::sleep(Duration::from_micros(8 * seconds_per_bit));
// eprintln!("TX: {:?}", gcode_line.gcode);
context.push_gcode_tx(gcode_line.gcode.clone(), is_polling);
let parser_result = parse_gcode(&gcode_line.gcode, context)
.map_err(|err| warn!("{}", err));
let ControllerConfig {
long_running_code_timeout,
fast_code_timeout,
long_running_codes,
blocking_codes,
..
|
identifier_body
|
send_serial.rs
|
use printspool_machine::components::ControllerConfig;
use super::*;
use crate::gcode_parser::parse_gcode;
pub fn
|
(
effects: &mut Vec<Effect>,
gcode_line: GCodeLine,
context: &mut Context,
is_polling: bool,
) {
// Allow for a byte of spacing between receiving and sending over the serial port
// The choice of 1 byte was arbitrary but sending without a spin lock seems to
// loose GCodes.
// let seconds_per_bit: u64 = (60 * 1000 * 1000 / context.baud_rate).into();
// spin_sleep::sleep(Duration::from_micros(8 * seconds_per_bit));
// eprintln!("TX: {:?}", gcode_line.gcode);
context.push_gcode_tx(gcode_line.gcode.clone(), is_polling);
let parser_result = parse_gcode(&gcode_line.gcode, context)
.map_err(|err| warn!("{}", err));
let ControllerConfig {
long_running_code_timeout,
fast_code_timeout,
long_running_codes,
blocking_codes,
..
} = &context.controller.model;
let mut duration = fast_code_timeout;
let mut is_blocking = false;
if let Ok(Some((mnemonic, major_number))) = parser_result {
let gcode_macro = format!("{}{}", mnemonic, major_number);
if long_running_codes.contains(&gcode_macro) {
duration = long_running_code_timeout
}
is_blocking = blocking_codes.contains(&gcode_macro)
};
effects.push(Effect::SendSerial(gcode_line));
if is_blocking {
effects.push(
Effect::CancelDelay { key: "tickle_delay".to_string() }
);
} else {
effects.push(
Effect::Delay {
key: "tickle_delay".to_string(),
// TODO: configurable delayFromGreetingToReady
duration: Duration::from_millis(*duration),
event: TickleSerialPort,
},
);
}
}
|
send_serial
|
identifier_name
|
send_serial.rs
|
use printspool_machine::components::ControllerConfig;
use super::*;
use crate::gcode_parser::parse_gcode;
pub fn send_serial(
effects: &mut Vec<Effect>,
gcode_line: GCodeLine,
context: &mut Context,
is_polling: bool,
) {
// Allow for a byte of spacing between receiving and sending over the serial port
// The choice of 1 byte was arbitrary but sending without a spin lock seems to
// loose GCodes.
// let seconds_per_bit: u64 = (60 * 1000 * 1000 / context.baud_rate).into();
// spin_sleep::sleep(Duration::from_micros(8 * seconds_per_bit));
// eprintln!("TX: {:?}", gcode_line.gcode);
context.push_gcode_tx(gcode_line.gcode.clone(), is_polling);
let parser_result = parse_gcode(&gcode_line.gcode, context)
.map_err(|err| warn!("{}", err));
let ControllerConfig {
long_running_code_timeout,
fast_code_timeout,
long_running_codes,
blocking_codes,
..
} = &context.controller.model;
let mut duration = fast_code_timeout;
let mut is_blocking = false;
if let Ok(Some((mnemonic, major_number))) = parser_result
|
;
effects.push(Effect::SendSerial(gcode_line));
if is_blocking {
effects.push(
Effect::CancelDelay { key: "tickle_delay".to_string() }
);
} else {
effects.push(
Effect::Delay {
key: "tickle_delay".to_string(),
// TODO: configurable delayFromGreetingToReady
duration: Duration::from_millis(*duration),
event: TickleSerialPort,
},
);
}
}
|
{
let gcode_macro = format!("{}{}", mnemonic, major_number);
if long_running_codes.contains(&gcode_macro) {
duration = long_running_code_timeout
}
is_blocking = blocking_codes.contains(&gcode_macro)
}
|
conditional_block
|
owned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A unique pointer type
use core::any::{Any, AnyRefExt};
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::fmt;
use core::intrinsics;
use core::kinds::Send;
use core::mem;
use core::option::Option;
use core::raw::TraitObject;
use core::result::{Ok, Err, Result};
/// A value that represents the global exchange heap. This is the default
/// place that the `box` keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// use std::owned::HEAP;
///
/// # struct Bar;
/// # impl Bar { fn new(_a: int) { } }
/// let foo = box(HEAP) Bar::new(2);
/// let foo = box Bar::new(2);
#[lang="exchange_heap"]
pub static HEAP: () = ();
/// A type that represents a uniquely-owned value.
#[lang="owned_box"]
pub struct Box<T>(*mut T);
impl<T: Default> Default for Box<T> {
fn default() -> Box<T> { box Default::default() }
}
#[unstable]
impl<T: Clone> Clone for Box<T> {
/// Return a copy of the owned box.
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Perform copy-assignment from `source` by reusing the existing allocation.
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
// box pointers
impl<T:PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { *(*self) == *(*other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { *(*self)!= *(*other) }
}
impl<T:PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
(**self).partial_cmp(*other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { *(*self) < *(*other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { *(*self) <= *(*other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { *(*self) >= *(*other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { *(*self) > *(*other) }
}
impl<T: Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering { (**self).cmp(*other) }
}
impl<T: Eq> Eq for Box<T> {}
/// Extension methods for an owning `Any` trait object
pub trait AnyOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnyOwnExt for Box<Any> {
#[inline]
fn move<T:'static>(self) -> Result<Box<T>, Box<Any>>
|
}
/// Extension methods for an owning `Any+Send` trait object
pub trait AnySendOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move_send<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnySendOwnExt for Box<Any+Send> {
#[inline]
fn move_send<T:'static>(self) -> Result<Box<T>, Box<Any+Send>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any+Send>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
}
impl<T: fmt::Show> fmt::Show for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
impl fmt::Show for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[cfg(test)]
mod test {
#[test]
fn test_owned_clone() {
let a = box 5i;
let b: Box<int> = a.clone();
assert!(a == b);
}
#[test]
fn any_move() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
match a.move::<uint>() {
Ok(a) => { assert!(a == box 8u); }
Err(..) => fail!()
}
match b.move::<Test>() {
Ok(a) => { assert!(a == box Test); }
Err(..) => fail!()
}
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
assert!(a.move::<Box<Test>>().is_err());
assert!(b.move::<Box<uint>>().is_err());
}
#[test]
fn test_show() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
let a_str = a.to_str();
let b_str = b.to_str();
assert_eq!(a_str.as_slice(), "Box<Any>");
assert_eq!(b_str.as_slice(), "Box<Any>");
let a = &8u as &Any;
let b = &Test as &Any;
let s = format!("{}", a);
assert_eq!(s.as_slice(), "&Any");
let s = format!("{}", b);
assert_eq!(s.as_slice(), "&Any");
}
}
|
{
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
|
identifier_body
|
owned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A unique pointer type
use core::any::{Any, AnyRefExt};
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::fmt;
use core::intrinsics;
use core::kinds::Send;
use core::mem;
use core::option::Option;
use core::raw::TraitObject;
use core::result::{Ok, Err, Result};
/// A value that represents the global exchange heap. This is the default
/// place that the `box` keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// use std::owned::HEAP;
///
/// # struct Bar;
/// # impl Bar { fn new(_a: int) { } }
/// let foo = box(HEAP) Bar::new(2);
/// let foo = box Bar::new(2);
#[lang="exchange_heap"]
pub static HEAP: () = ();
/// A type that represents a uniquely-owned value.
#[lang="owned_box"]
pub struct Box<T>(*mut T);
impl<T: Default> Default for Box<T> {
fn default() -> Box<T> { box Default::default() }
}
#[unstable]
impl<T: Clone> Clone for Box<T> {
/// Return a copy of the owned box.
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Perform copy-assignment from `source` by reusing the existing allocation.
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
// box pointers
impl<T:PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { *(*self) == *(*other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { *(*self)!= *(*other) }
}
impl<T:PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
(**self).partial_cmp(*other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { *(*self) < *(*other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { *(*self) <= *(*other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { *(*self) >= *(*other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { *(*self) > *(*other) }
}
impl<T: Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering { (**self).cmp(*other) }
}
impl<T: Eq> Eq for Box<T> {}
/// Extension methods for an owning `Any` trait object
pub trait AnyOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnyOwnExt for Box<Any> {
#[inline]
fn move<T:'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
}
/// Extension methods for an owning `Any+Send` trait object
pub trait AnySendOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move_send<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnySendOwnExt for Box<Any+Send> {
#[inline]
fn move_send<T:'static>(self) -> Result<Box<T>, Box<Any+Send>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any+Send>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
}
|
}
}
impl fmt::Show for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[cfg(test)]
mod test {
#[test]
fn test_owned_clone() {
let a = box 5i;
let b: Box<int> = a.clone();
assert!(a == b);
}
#[test]
fn any_move() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
match a.move::<uint>() {
Ok(a) => { assert!(a == box 8u); }
Err(..) => fail!()
}
match b.move::<Test>() {
Ok(a) => { assert!(a == box Test); }
Err(..) => fail!()
}
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
assert!(a.move::<Box<Test>>().is_err());
assert!(b.move::<Box<uint>>().is_err());
}
#[test]
fn test_show() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
let a_str = a.to_str();
let b_str = b.to_str();
assert_eq!(a_str.as_slice(), "Box<Any>");
assert_eq!(b_str.as_slice(), "Box<Any>");
let a = &8u as &Any;
let b = &Test as &Any;
let s = format!("{}", a);
assert_eq!(s.as_slice(), "&Any");
let s = format!("{}", b);
assert_eq!(s.as_slice(), "&Any");
}
}
|
impl<T: fmt::Show> fmt::Show for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
|
random_line_split
|
owned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A unique pointer type
use core::any::{Any, AnyRefExt};
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::fmt;
use core::intrinsics;
use core::kinds::Send;
use core::mem;
use core::option::Option;
use core::raw::TraitObject;
use core::result::{Ok, Err, Result};
/// A value that represents the global exchange heap. This is the default
/// place that the `box` keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// use std::owned::HEAP;
///
/// # struct Bar;
/// # impl Bar { fn new(_a: int) { } }
/// let foo = box(HEAP) Bar::new(2);
/// let foo = box Bar::new(2);
#[lang="exchange_heap"]
pub static HEAP: () = ();
/// A type that represents a uniquely-owned value.
#[lang="owned_box"]
pub struct Box<T>(*mut T);
impl<T: Default> Default for Box<T> {
fn default() -> Box<T> { box Default::default() }
}
#[unstable]
impl<T: Clone> Clone for Box<T> {
/// Return a copy of the owned box.
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Perform copy-assignment from `source` by reusing the existing allocation.
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
// box pointers
impl<T:PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { *(*self) == *(*other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { *(*self)!= *(*other) }
}
impl<T:PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
(**self).partial_cmp(*other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { *(*self) < *(*other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { *(*self) <= *(*other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { *(*self) >= *(*other) }
#[inline]
fn
|
(&self, other: &Box<T>) -> bool { *(*self) > *(*other) }
}
impl<T: Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering { (**self).cmp(*other) }
}
impl<T: Eq> Eq for Box<T> {}
/// Extension methods for an owning `Any` trait object
pub trait AnyOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnyOwnExt for Box<Any> {
#[inline]
fn move<T:'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
}
/// Extension methods for an owning `Any+Send` trait object
pub trait AnySendOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move_send<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnySendOwnExt for Box<Any+Send> {
#[inline]
fn move_send<T:'static>(self) -> Result<Box<T>, Box<Any+Send>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any+Send>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
}
impl<T: fmt::Show> fmt::Show for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
impl fmt::Show for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[cfg(test)]
mod test {
#[test]
fn test_owned_clone() {
let a = box 5i;
let b: Box<int> = a.clone();
assert!(a == b);
}
#[test]
fn any_move() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
match a.move::<uint>() {
Ok(a) => { assert!(a == box 8u); }
Err(..) => fail!()
}
match b.move::<Test>() {
Ok(a) => { assert!(a == box Test); }
Err(..) => fail!()
}
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
assert!(a.move::<Box<Test>>().is_err());
assert!(b.move::<Box<uint>>().is_err());
}
#[test]
fn test_show() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
let a_str = a.to_str();
let b_str = b.to_str();
assert_eq!(a_str.as_slice(), "Box<Any>");
assert_eq!(b_str.as_slice(), "Box<Any>");
let a = &8u as &Any;
let b = &Test as &Any;
let s = format!("{}", a);
assert_eq!(s.as_slice(), "&Any");
let s = format!("{}", b);
assert_eq!(s.as_slice(), "&Any");
}
}
|
gt
|
identifier_name
|
owned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A unique pointer type
use core::any::{Any, AnyRefExt};
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::fmt;
use core::intrinsics;
use core::kinds::Send;
use core::mem;
use core::option::Option;
use core::raw::TraitObject;
use core::result::{Ok, Err, Result};
/// A value that represents the global exchange heap. This is the default
/// place that the `box` keyword allocates into when no place is supplied.
///
/// The following two examples are equivalent:
///
/// use std::owned::HEAP;
///
/// # struct Bar;
/// # impl Bar { fn new(_a: int) { } }
/// let foo = box(HEAP) Bar::new(2);
/// let foo = box Bar::new(2);
#[lang="exchange_heap"]
pub static HEAP: () = ();
/// A type that represents a uniquely-owned value.
#[lang="owned_box"]
pub struct Box<T>(*mut T);
impl<T: Default> Default for Box<T> {
fn default() -> Box<T> { box Default::default() }
}
#[unstable]
impl<T: Clone> Clone for Box<T> {
/// Return a copy of the owned box.
#[inline]
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Perform copy-assignment from `source` by reusing the existing allocation.
#[inline]
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
// box pointers
impl<T:PartialEq> PartialEq for Box<T> {
#[inline]
fn eq(&self, other: &Box<T>) -> bool { *(*self) == *(*other) }
#[inline]
fn ne(&self, other: &Box<T>) -> bool { *(*self)!= *(*other) }
}
impl<T:PartialOrd> PartialOrd for Box<T> {
#[inline]
fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> {
(**self).partial_cmp(*other)
}
#[inline]
fn lt(&self, other: &Box<T>) -> bool { *(*self) < *(*other) }
#[inline]
fn le(&self, other: &Box<T>) -> bool { *(*self) <= *(*other) }
#[inline]
fn ge(&self, other: &Box<T>) -> bool { *(*self) >= *(*other) }
#[inline]
fn gt(&self, other: &Box<T>) -> bool { *(*self) > *(*other) }
}
impl<T: Ord> Ord for Box<T> {
#[inline]
fn cmp(&self, other: &Box<T>) -> Ordering { (**self).cmp(*other) }
}
impl<T: Eq> Eq for Box<T> {}
/// Extension methods for an owning `Any` trait object
pub trait AnyOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnyOwnExt for Box<Any> {
#[inline]
fn move<T:'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>()
|
else {
Err(self)
}
}
}
/// Extension methods for an owning `Any+Send` trait object
pub trait AnySendOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move_send<T:'static>(self) -> Result<Box<T>, Self>;
}
impl AnySendOwnExt for Box<Any+Send> {
#[inline]
fn move_send<T:'static>(self) -> Result<Box<T>, Box<Any+Send>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any+Send>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
} else {
Err(self)
}
}
}
impl<T: fmt::Show> fmt::Show for Box<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
impl fmt::Show for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Box<Any>")
}
}
#[cfg(test)]
mod test {
#[test]
fn test_owned_clone() {
let a = box 5i;
let b: Box<int> = a.clone();
assert!(a == b);
}
#[test]
fn any_move() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
match a.move::<uint>() {
Ok(a) => { assert!(a == box 8u); }
Err(..) => fail!()
}
match b.move::<Test>() {
Ok(a) => { assert!(a == box Test); }
Err(..) => fail!()
}
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
assert!(a.move::<Box<Test>>().is_err());
assert!(b.move::<Box<uint>>().is_err());
}
#[test]
fn test_show() {
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
let a_str = a.to_str();
let b_str = b.to_str();
assert_eq!(a_str.as_slice(), "Box<Any>");
assert_eq!(b_str.as_slice(), "Box<Any>");
let a = &8u as &Any;
let b = &Test as &Any;
let s = format!("{}", a);
assert_eq!(s.as_slice(), "&Any");
let s = format!("{}", b);
assert_eq!(s.as_slice(), "&Any");
}
}
|
{
unsafe {
// Get the raw representation of the trait object
let to: TraitObject =
*mem::transmute::<&Box<Any>, &TraitObject>(&self);
// Prevent destructor on self being run
intrinsics::forget(self);
// Extract the data pointer
Ok(mem::transmute(to.data))
}
}
|
conditional_block
|
main.rs
|
pub mod release;
use std::env::args;
use std::process::exit;
use crate::release::*;
use compat::getpid;
use config::Config;
use logger::{Level, Logger};
use networking::Server;
fn main() {
let mut config = Config::new(Logger::new(Level::Notice));
if let Some(f) = args().nth(1)
|
let (port, daemonize) = (config.port, config.daemonize);
let mut server = Server::new(config);
{
let mut db = server.get_mut_db();
db.git_sha1 = GIT_SHA1;
db.git_dirty = GIT_DIRTY;
db.version = env!("CARGO_PKG_VERSION");
db.rustc_version = RUSTC_VERSION;
}
if!daemonize {
println!("Port: {}", port);
println!("PID: {}", getpid());
}
server.run();
}
|
{
if config.parsefile(f).is_err() {
exit(1);
}
}
|
conditional_block
|
main.rs
|
pub mod release;
use std::env::args;
use std::process::exit;
use crate::release::*;
use compat::getpid;
use config::Config;
use logger::{Level, Logger};
use networking::Server;
fn main()
|
println!("PID: {}", getpid());
}
server.run();
}
|
{
let mut config = Config::new(Logger::new(Level::Notice));
if let Some(f) = args().nth(1) {
if config.parsefile(f).is_err() {
exit(1);
}
}
let (port, daemonize) = (config.port, config.daemonize);
let mut server = Server::new(config);
{
let mut db = server.get_mut_db();
db.git_sha1 = GIT_SHA1;
db.git_dirty = GIT_DIRTY;
db.version = env!("CARGO_PKG_VERSION");
db.rustc_version = RUSTC_VERSION;
}
if !daemonize {
println!("Port: {}", port);
|
identifier_body
|
main.rs
|
pub mod release;
use std::env::args;
use std::process::exit;
use crate::release::*;
use compat::getpid;
use config::Config;
use logger::{Level, Logger};
use networking::Server;
fn main() {
let mut config = Config::new(Logger::new(Level::Notice));
if let Some(f) = args().nth(1) {
if config.parsefile(f).is_err() {
exit(1);
}
}
let (port, daemonize) = (config.port, config.daemonize);
let mut server = Server::new(config);
{
|
let mut db = server.get_mut_db();
db.git_sha1 = GIT_SHA1;
db.git_dirty = GIT_DIRTY;
db.version = env!("CARGO_PKG_VERSION");
db.rustc_version = RUSTC_VERSION;
}
if!daemonize {
println!("Port: {}", port);
println!("PID: {}", getpid());
}
server.run();
}
|
random_line_split
|
|
main.rs
|
pub mod release;
use std::env::args;
use std::process::exit;
use crate::release::*;
use compat::getpid;
use config::Config;
use logger::{Level, Logger};
use networking::Server;
fn
|
() {
let mut config = Config::new(Logger::new(Level::Notice));
if let Some(f) = args().nth(1) {
if config.parsefile(f).is_err() {
exit(1);
}
}
let (port, daemonize) = (config.port, config.daemonize);
let mut server = Server::new(config);
{
let mut db = server.get_mut_db();
db.git_sha1 = GIT_SHA1;
db.git_dirty = GIT_DIRTY;
db.version = env!("CARGO_PKG_VERSION");
db.rustc_version = RUSTC_VERSION;
}
if!daemonize {
println!("Port: {}", port);
println!("PID: {}", getpid());
}
server.run();
}
|
main
|
identifier_name
|
portal.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
use uuid::Uuid;
use world::TraversalDirection;
use world::TraversalDirection::*;
#[deriving(Encodable, Decodable)]
pub struct Portal {
id: Uuid,
a_zid: Uuid,
a_exit: TraversalDirection,
b_zid: Uuid,
b_exit: TraversalDirection
}
impl Portal {
|
b_zid: Uuid, bx: TraversalDirection) -> Portal {
if ae == North && bx!= South { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == South && bx!= North { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == West && bx!= East { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == East && bx!= West { panic!("bad portal dirs a:{} b:{}", ae, bx); }
Portal { id: id, a_zid: a_zid, a_exit: ae, b_zid: b_zid, b_exit: bx }
}
pub fn info_from(&self, zid: Uuid) -> (Uuid, TraversalDirection) {
if self.a_zid == zid { (self.b_zid, self.a_exit) }
else if self.b_zid == zid { (self.a_zid, self.b_exit) }
else { panic!("zid:{} isn't in this portal!", zid) }
}
}
|
pub fn new(id: Uuid, a_zid: Uuid, ae: TraversalDirection,
|
random_line_split
|
portal.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
use uuid::Uuid;
use world::TraversalDirection;
use world::TraversalDirection::*;
#[deriving(Encodable, Decodable)]
pub struct Portal {
id: Uuid,
a_zid: Uuid,
a_exit: TraversalDirection,
b_zid: Uuid,
b_exit: TraversalDirection
}
impl Portal {
pub fn new(id: Uuid, a_zid: Uuid, ae: TraversalDirection,
b_zid: Uuid, bx: TraversalDirection) -> Portal
|
pub fn info_from(&self, zid: Uuid) -> (Uuid, TraversalDirection) {
if self.a_zid == zid { (self.b_zid, self.a_exit) }
else if self.b_zid == zid { (self.a_zid, self.b_exit) }
else { panic!("zid:{} isn't in this portal!", zid) }
}
}
|
{
if ae == North && bx != South { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == South && bx != North { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == West && bx != East { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == East && bx != West { panic!("bad portal dirs a:{} b:{}", ae, bx); }
Portal { id: id, a_zid: a_zid, a_exit: ae, b_zid: b_zid, b_exit: bx }
}
|
identifier_body
|
portal.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
use uuid::Uuid;
use world::TraversalDirection;
use world::TraversalDirection::*;
#[deriving(Encodable, Decodable)]
pub struct Portal {
id: Uuid,
a_zid: Uuid,
a_exit: TraversalDirection,
b_zid: Uuid,
b_exit: TraversalDirection
}
impl Portal {
pub fn new(id: Uuid, a_zid: Uuid, ae: TraversalDirection,
b_zid: Uuid, bx: TraversalDirection) -> Portal {
if ae == North && bx!= South
|
if ae == South && bx!= North { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == West && bx!= East { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == East && bx!= West { panic!("bad portal dirs a:{} b:{}", ae, bx); }
Portal { id: id, a_zid: a_zid, a_exit: ae, b_zid: b_zid, b_exit: bx }
}
pub fn info_from(&self, zid: Uuid) -> (Uuid, TraversalDirection) {
if self.a_zid == zid { (self.b_zid, self.a_exit) }
else if self.b_zid == zid { (self.a_zid, self.b_exit) }
else { panic!("zid:{} isn't in this portal!", zid) }
}
}
|
{ panic!("bad portal dirs a:{} b:{}", ae, bx); }
|
conditional_block
|
portal.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
use uuid::Uuid;
use world::TraversalDirection;
use world::TraversalDirection::*;
#[deriving(Encodable, Decodable)]
pub struct Portal {
id: Uuid,
a_zid: Uuid,
a_exit: TraversalDirection,
b_zid: Uuid,
b_exit: TraversalDirection
}
impl Portal {
pub fn
|
(id: Uuid, a_zid: Uuid, ae: TraversalDirection,
b_zid: Uuid, bx: TraversalDirection) -> Portal {
if ae == North && bx!= South { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == South && bx!= North { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == West && bx!= East { panic!("bad portal dirs a:{} b:{}", ae, bx); }
if ae == East && bx!= West { panic!("bad portal dirs a:{} b:{}", ae, bx); }
Portal { id: id, a_zid: a_zid, a_exit: ae, b_zid: b_zid, b_exit: bx }
}
pub fn info_from(&self, zid: Uuid) -> (Uuid, TraversalDirection) {
if self.a_zid == zid { (self.b_zid, self.a_exit) }
else if self.b_zid == zid { (self.a_zid, self.b_exit) }
else { panic!("zid:{} isn't in this portal!", zid) }
}
}
|
new
|
identifier_name
|
lib.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
|
// extern crate regex;
extern crate syntax;
extern crate regex;
#[cfg(test)] extern crate hamcrest;
pub mod builder;
pub mod node;
pub mod parser;
#[path="../../src/hal/lpc17xx/platformtree.rs"] mod lpc17xx_pt;
#[path="../../src/hal/tiva_c/platformtree.rs"] mod tiva_c_pt;
#[path="../../src/drivers/drivers_pt.rs"] mod drivers_pt;
#[cfg(test)] mod test_helpers;
#[cfg(test)] mod parser_test;
|
//! Platform tree operations crate
#![feature(quote, rustc_private)]
|
random_line_split
|
lib.rs
|
#![doc(html_logo_url = "https://avatars0.githubusercontent.com/u/7853871?s=128", html_favicon_url = "https://avatars0.githubusercontent.com/u/7853871?s=256", html_root_url = "http://ironframework.io/core/iron")]
#![cfg_attr(test, deny(warnings))]
#![deny(missing_docs)]
//! The main crate for Iron.
//!
//! ## Overview
//!
//! Iron is a high level web framework built in and for Rust, built on
//! [hyper](https://github.com/hyperium/hyper). Iron is designed to take advantage
//! of Rust's greatest features - its excellent type system and principled
|
//! writes and locking in the core framework.
//!
//! ## Hello World
//!
//! ```no_run
//! extern crate iron;
//!
//! use iron::prelude::*;
//! use iron::status;
//!
//! fn main() {
//! Iron::new(|_: &mut Request| {
//! Ok(Response::with((status::Ok, "Hello World!")))
//! }).http("localhost:3000").unwrap();
//! }
//! ```
//!
//! ## Design Philosophy
//!
//! Iron is meant to be as extensible and pluggable as possible; Iron's core is
//! concentrated and avoids unnecessary features by leaving them to middleware,
//! plugins, and modifiers.
//!
//! Middleware, Plugins, and Modifiers are the main ways to extend Iron with new
//! functionality. Most extensions that would be provided by middleware in other
//! web frameworks are instead addressed by the much simpler Modifier and Plugin
//! systems.
//!
//! Modifiers allow external code to manipulate Requests and Response in an ergonomic
//! fashion, allowing third-party extensions to get the same treatment as modifiers
//! defined in Iron itself. Plugins allow for lazily-evaluated, automatically cached
//! extensions to Requests and Responses, perfect for parsing, accessing, and
//! otherwise lazily manipulating an http connection.
//!
//! Middleware are only used when it is necessary to modify the control flow of a
//! Request flow, hijack the entire handling of a Request, check an incoming
//! Request, or to do final post-processing. This covers areas such as routing,
//! mounting, static asset serving, final template rendering, authentication, and
//! logging.
//!
//! Iron comes with only basic modifiers for setting the status, body, and various
//! headers, and the infrastructure for creating modifiers, plugins, and
//! middleware. No plugins or middleware are bundled with Iron.
//!
// Stdlib dependencies
#[macro_use] extern crate log;
// Third party packages
extern crate hyper;
extern crate typemap as tmap;
extern crate plugin;
extern crate error as err;
extern crate url;
extern crate num_cpus;
extern crate conduit_mime_types as mime_types;
#[macro_use]
extern crate lazy_static;
// Request + Response
pub use request::{Request, Url};
pub use response::Response;
// Middleware system
pub use middleware::{BeforeMiddleware, AfterMiddleware, AroundMiddleware,
Handler, Chain};
// Server
pub use iron::*;
// Extensions
pub use typemap::TypeMap;
// Headers
pub use hyper::header as headers;
pub use hyper::header::Headers;
// Expose `Pluggable` as `Plugin` so users can do `use iron::Plugin`.
pub use plugin::Pluggable as Plugin;
// Expose modifiers.
pub use modifier::Set;
// Errors
pub use error::Error;
pub use error::IronError;
// Mime types
pub use hyper::mime;
/// Iron's error type and associated utilities.
pub mod error;
/// The Result alias used throughout Iron and in clients of Iron.
pub type IronResult<T> = Result<T, IronError>;
/// A module meant to be glob imported when using Iron.
///
/// For instance:
///
/// ```
/// use iron::prelude::*;
/// ```
///
/// This module contains several important traits that provide many
/// of the convenience methods in Iron, as well as `Request`, `Response`
/// `IronResult`, `IronError` and `Iron`.
pub mod prelude {
pub use {Set, Plugin, Chain, Request, Response,
IronResult, IronError, Iron};
}
/// Re-exports from the TypeMap crate.
pub mod typemap {
pub use tmap::{TypeMap, Key};
}
/// Re-exports from the Modifier crate.
pub mod modifier {
extern crate modifier as modfier;
pub use self::modfier::*;
}
/// Status Codes
pub mod status {
pub use hyper::status::StatusCode as Status;
pub use hyper::status::StatusCode::*;
pub use hyper::status::StatusClass;
}
/// HTTP Methods
pub mod method {
pub use hyper::method::Method;
pub use hyper::method::Method::*;
}
// Publicized to show the documentation
pub mod middleware;
// Response utilities
pub mod response;
// Request utilities
pub mod request;
// Request and Response Modifiers
pub mod modifiers;
// Helper macros for error handling
mod macros;
mod iron;
|
//! approach to ownership in both single threaded and multi threaded contexts.
//!
//! Iron is highly concurrent and can scale horizontally on more machines behind a
//! load balancer or by running more threads on a more powerful machine. Iron
//! avoids the bottlenecks encountered in highly concurrent code by avoiding shared
|
random_line_split
|
fromdb.rs
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
|
use rusqlite::Row;
use crate::catalog::CatalogVersion;
/// Trait to define loading from a database.
pub trait FromDb: Sized {
/// Read one element from a database Row obtained through a query
/// build with the tables and columns provided.
/// The version of the catalog allow selecting the proper variant.
fn read_from(version: CatalogVersion, row: &Row) -> crate::Result<Self>;
/// DB tables used in select query.
fn read_db_tables(version: CatalogVersion) -> &'static str;
/// DB columns used in select query.
fn read_db_columns(version: CatalogVersion) -> &'static str;
/// WHERE clause for joining tables (doesn't include `WHERE`)
/// Default is empty
fn read_join_where(_version: CatalogVersion) -> &'static str {
""
}
}
|
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
|
random_line_split
|
fromdb.rs
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
use rusqlite::Row;
use crate::catalog::CatalogVersion;
/// Trait to define loading from a database.
pub trait FromDb: Sized {
/// Read one element from a database Row obtained through a query
/// build with the tables and columns provided.
/// The version of the catalog allow selecting the proper variant.
fn read_from(version: CatalogVersion, row: &Row) -> crate::Result<Self>;
/// DB tables used in select query.
fn read_db_tables(version: CatalogVersion) -> &'static str;
/// DB columns used in select query.
fn read_db_columns(version: CatalogVersion) -> &'static str;
/// WHERE clause for joining tables (doesn't include `WHERE`)
/// Default is empty
fn
|
(_version: CatalogVersion) -> &'static str {
""
}
}
|
read_join_where
|
identifier_name
|
fromdb.rs
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
use rusqlite::Row;
use crate::catalog::CatalogVersion;
/// Trait to define loading from a database.
pub trait FromDb: Sized {
/// Read one element from a database Row obtained through a query
/// build with the tables and columns provided.
/// The version of the catalog allow selecting the proper variant.
fn read_from(version: CatalogVersion, row: &Row) -> crate::Result<Self>;
/// DB tables used in select query.
fn read_db_tables(version: CatalogVersion) -> &'static str;
/// DB columns used in select query.
fn read_db_columns(version: CatalogVersion) -> &'static str;
/// WHERE clause for joining tables (doesn't include `WHERE`)
/// Default is empty
fn read_join_where(_version: CatalogVersion) -> &'static str
|
}
|
{
""
}
|
identifier_body
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::error::Error::{InvalidCharacter, Syntax};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use dom::element::{Element, AttributeHandlers};
use dom::node::window_from_node;
use util::str::{DOMString, HTML_SPACE_CHARACTERS, str_join};
use string_cache::Atom;
use std::borrow::ToOwned;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
}
trait PrivateDOMTokenListHelpers {
fn attribute(self) -> Option<Root<Attr>>;
fn check_token_exceptions(self, token: &str) -> Fallible<Atom>;
}
impl<'a> PrivateDOMTokenListHelpers for &'a DOMTokenList {
fn attribute(self) -> Option<Root<Attr>> {
let element = self.element.root();
element.r().get_attribute(&ns!(""), &self.local_name)
}
fn check_token_exceptions(self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(InvalidCharacter),
slice => Ok(Atom::from_slice(slice))
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl<'a> DOMTokenListMethods for &'a DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn Length(self) -> u32 {
self.attribute().map(|attr| {
let attr = attr.r();
attr.value().tokens().map(|tokens| tokens.len()).unwrap_or(0)
}).unwrap_or(0) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
let attr = attr.r();
attr.value().tokens().and_then(|tokens| {
tokens.get(index as usize).map(|token| (**token).to_owned())
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map(|attr| {
let attr = attr.r();
attr.value()
.tokens()
.expect("Should have parsed this attribute")
.iter()
.any(|atom| *atom == token)
}).unwrap_or(false)
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(self, tokens: Vec<DOMString>) -> ErrorResult {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(self, tokens: Vec<DOMString>) -> ErrorResult {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| {
atoms.remove(index)
});
}
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
|
}
}
}
}
// https://dom.spec.whatwg.org/#stringification-behavior
fn Stringifier(self) -> DOMString {
let tokenlist = self.element.root().r().get_tokenlist_attribute(&self.local_name);
str_join(&tokenlist, "\x20")
}
// check-tidy: no specs after this line
fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
_ => {
atoms.push(token);
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
|
random_line_split
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::error::Error::{InvalidCharacter, Syntax};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use dom::element::{Element, AttributeHandlers};
use dom::node::window_from_node;
use util::str::{DOMString, HTML_SPACE_CHARACTERS, str_join};
use string_cache::Atom;
use std::borrow::ToOwned;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
}
trait PrivateDOMTokenListHelpers {
fn attribute(self) -> Option<Root<Attr>>;
fn check_token_exceptions(self, token: &str) -> Fallible<Atom>;
}
impl<'a> PrivateDOMTokenListHelpers for &'a DOMTokenList {
fn attribute(self) -> Option<Root<Attr>>
|
fn check_token_exceptions(self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(InvalidCharacter),
slice => Ok(Atom::from_slice(slice))
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl<'a> DOMTokenListMethods for &'a DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn Length(self) -> u32 {
self.attribute().map(|attr| {
let attr = attr.r();
attr.value().tokens().map(|tokens| tokens.len()).unwrap_or(0)
}).unwrap_or(0) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
let attr = attr.r();
attr.value().tokens().and_then(|tokens| {
tokens.get(index as usize).map(|token| (**token).to_owned())
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map(|attr| {
let attr = attr.r();
attr.value()
.tokens()
.expect("Should have parsed this attribute")
.iter()
.any(|atom| *atom == token)
}).unwrap_or(false)
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(self, tokens: Vec<DOMString>) -> ErrorResult {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(self, tokens: Vec<DOMString>) -> ErrorResult {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| {
atoms.remove(index)
});
}
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
_ => {
atoms.push(token);
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
}
}
}
}
// https://dom.spec.whatwg.org/#stringification-behavior
fn Stringifier(self) -> DOMString {
let tokenlist = self.element.root().r().get_tokenlist_attribute(&self.local_name);
str_join(&tokenlist, "\x20")
}
// check-tidy: no specs after this line
fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
{
let element = self.element.root();
element.r().get_attribute(&ns!(""), &self.local_name)
}
|
identifier_body
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::error::Error::{InvalidCharacter, Syntax};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use dom::element::{Element, AttributeHandlers};
use dom::node::window_from_node;
use util::str::{DOMString, HTML_SPACE_CHARACTERS, str_join};
use string_cache::Atom;
use std::borrow::ToOwned;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
}
trait PrivateDOMTokenListHelpers {
fn attribute(self) -> Option<Root<Attr>>;
fn check_token_exceptions(self, token: &str) -> Fallible<Atom>;
}
impl<'a> PrivateDOMTokenListHelpers for &'a DOMTokenList {
fn attribute(self) -> Option<Root<Attr>> {
let element = self.element.root();
element.r().get_attribute(&ns!(""), &self.local_name)
}
fn check_token_exceptions(self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(InvalidCharacter),
slice => Ok(Atom::from_slice(slice))
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl<'a> DOMTokenListMethods for &'a DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn
|
(self) -> u32 {
self.attribute().map(|attr| {
let attr = attr.r();
attr.value().tokens().map(|tokens| tokens.len()).unwrap_or(0)
}).unwrap_or(0) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
let attr = attr.r();
attr.value().tokens().and_then(|tokens| {
tokens.get(index as usize).map(|token| (**token).to_owned())
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map(|attr| {
let attr = attr.r();
attr.value()
.tokens()
.expect("Should have parsed this attribute")
.iter()
.any(|atom| *atom == token)
}).unwrap_or(false)
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(self, tokens: Vec<DOMString>) -> ErrorResult {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(self, tokens: Vec<DOMString>) -> ErrorResult {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| {
atoms.remove(index)
});
}
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let element = self.element.root();
let mut atoms = element.r().get_tokenlist_attribute(&self.local_name);
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
_ => {
atoms.push(token);
element.r().set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
}
}
}
}
// https://dom.spec.whatwg.org/#stringification-behavior
fn Stringifier(self) -> DOMString {
let tokenlist = self.element.root().r().get_tokenlist_attribute(&self.local_name);
str_join(&tokenlist, "\x20")
}
// check-tidy: no specs after this line
fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
Length
|
identifier_name
|
query04.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Order Priority Checking Query (Q4)
// -- Functional Query Definition
// -- Approved February 1998
// :x
|
// from
// orders
// where
// o_orderdate >= date ':1'
// and o_orderdate < date ':1' + interval '3' month
// and exists (
// select
// *
// from
// lineitem
// where
// l_orderkey = o_orderkey
// and l_commitdate < l_receiptdate
// )
// group by
// o_orderpriority
// order by
// o_orderpriority;
// :n -1
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.flat_map(|l| if l.commit_date < l.receipt_date { Some(l.order_key) } else { None })
.distinct_total();
collections
.orders()
.flat_map(|o|
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some((o.order_key, o.order_priority))
}
else { None }
)
.semijoin(&lineitems)
.map(|(_k,v)| v)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.flat_map(|l| if l.commit_date < l.receipt_date { Some((l.order_key, ())) } else { None })
.distinct_total()
.join_core(&arrangements.order, |_k,&(),o| {
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some(o.order_priority)
}
else {
None
}
})
.count_total()
.probe_with(probe);
}
|
// :o
// select
// o_orderpriority,
// count(*) as order_count
|
random_line_split
|
query04.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Order Priority Checking Query (Q4)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// o_orderpriority,
// count(*) as order_count
// from
// orders
// where
// o_orderdate >= date ':1'
// and o_orderdate < date ':1' + interval '3' month
// and exists (
// select
// *
// from
// lineitem
// where
// l_orderkey = o_orderkey
// and l_commitdate < l_receiptdate
// )
// group by
// o_orderpriority
// order by
// o_orderpriority;
// :n -1
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.flat_map(|l| if l.commit_date < l.receipt_date { Some(l.order_key) } else { None })
.distinct_total();
collections
.orders()
.flat_map(|o|
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some((o.order_key, o.order_priority))
}
else { None }
)
.semijoin(&lineitems)
.map(|(_k,v)| v)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
|
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.flat_map(|l| if l.commit_date < l.receipt_date { Some((l.order_key, ())) } else { None })
.distinct_total()
.join_core(&arrangements.order, |_k,&(),o| {
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some(o.order_priority)
}
else {
None
}
})
.count_total()
.probe_with(probe);
}
|
identifier_body
|
|
query04.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Order Priority Checking Query (Q4)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// o_orderpriority,
// count(*) as order_count
// from
// orders
// where
// o_orderdate >= date ':1'
// and o_orderdate < date ':1' + interval '3' month
// and exists (
// select
// *
// from
// lineitem
// where
// l_orderkey = o_orderkey
// and l_commitdate < l_receiptdate
// )
// group by
// o_orderpriority
// order by
// o_orderpriority;
// :n -1
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.flat_map(|l| if l.commit_date < l.receipt_date { Some(l.order_key) } else { None })
.distinct_total();
collections
.orders()
.flat_map(|o|
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some((o.order_key, o.order_priority))
}
else { None }
)
.semijoin(&lineitems)
.map(|(_k,v)| v)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn
|
<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.flat_map(|l| if l.commit_date < l.receipt_date { Some((l.order_key, ())) } else { None })
.distinct_total()
.join_core(&arrangements.order, |_k,&(),o| {
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some(o.order_priority)
}
else {
None
}
})
.count_total()
.probe_with(probe);
}
|
query_arranged
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.