file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
index.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
|
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use serde::{Deserialize, Deserializer, Error};
use serde::de::Visitor;
/// Represents usize.
#[derive(Debug, PartialEq)]
pub struct Index(usize);
impl Index {
/// Convert to usize
pub fn value(&self) -> usize {
self.0
}
}
impl Deserialize for Index {
fn deserialize<D>(deserializer: &mut D) -> Result<Index, D::Error>
where D: Deserializer {
deserializer.deserialize(IndexVisitor)
}
}
struct IndexVisitor;
impl Visitor for IndexVisitor {
type Value = Index;
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
match value {
_ if value.starts_with("0x") => usize::from_str_radix(&value[2..], 16).map(Index).map_err(|_| Error::custom("invalid index")),
_ => value.parse::<usize>().map(Index).map_err(|_| Error::custom("invalid index"))
}
}
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
self.visit_str(value.as_ref())
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn block_number_deserialization() {
let s = r#"["0xa", "10"]"#;
let deserialized: Vec<Index> = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, vec![Index(10), Index(10)]);
}
}
|
random_line_split
|
|
test_result.rs
|
mod checked {
#[derive(Debug)]
pub enum MathError {
DivisionByZero,
NonPositiveLogarithm,
NegativeSquareRoot,
}
pub type MathResult = Result<f64, MathError>;
pub fn div(x: f64, y: f64) -> MathResult {
if y == 0.0 {
Err(MathError::DivisionByZero)
} else {
Ok(x / y)
}
}
pub fn sqrt(x: f64) -> MathResult {
if x < 0.0 {
Err(MathError::NegativeSquareRoot)
} else {
Ok(x.sqrt())
}
}
pub fn ln(x: f64) -> MathResult {
if x <= 0.0 {
Err(MathError::NonPositiveLogarithm)
} else {
Ok(x.ln())
}
}
}
fn op(x: f64, y: f64) -> f64 {
match checked::div(x, y) {
Err(why) => panic!("{:?}", why),
Ok(ratio) => match checked::ln(ratio) {
Err(why) => panic!("{:?}", why),
|
Ok(sqrt) => sqrt,
},
},
}
}
fn main() {
println!("{}", op(1.0, 10.0));
}
|
Ok(ln) => match checked::sqrt(ln) {
Err(why) => panic!("{:?}", why),
|
random_line_split
|
test_result.rs
|
mod checked {
#[derive(Debug)]
pub enum MathError {
DivisionByZero,
NonPositiveLogarithm,
NegativeSquareRoot,
}
pub type MathResult = Result<f64, MathError>;
pub fn div(x: f64, y: f64) -> MathResult
|
pub fn sqrt(x: f64) -> MathResult {
if x < 0.0 {
Err(MathError::NegativeSquareRoot)
} else {
Ok(x.sqrt())
}
}
pub fn ln(x: f64) -> MathResult {
if x <= 0.0 {
Err(MathError::NonPositiveLogarithm)
} else {
Ok(x.ln())
}
}
}
fn op(x: f64, y: f64) -> f64 {
match checked::div(x, y) {
Err(why) => panic!("{:?}", why),
Ok(ratio) => match checked::ln(ratio) {
Err(why) => panic!("{:?}", why),
Ok(ln) => match checked::sqrt(ln) {
Err(why) => panic!("{:?}", why),
Ok(sqrt) => sqrt,
},
},
}
}
fn main() {
println!("{}", op(1.0, 10.0));
}
|
{
if y == 0.0 {
Err(MathError::DivisionByZero)
} else {
Ok(x / y)
}
}
|
identifier_body
|
test_result.rs
|
mod checked {
#[derive(Debug)]
pub enum MathError {
DivisionByZero,
NonPositiveLogarithm,
NegativeSquareRoot,
}
pub type MathResult = Result<f64, MathError>;
pub fn div(x: f64, y: f64) -> MathResult {
if y == 0.0 {
Err(MathError::DivisionByZero)
} else {
Ok(x / y)
}
}
pub fn sqrt(x: f64) -> MathResult {
if x < 0.0 {
Err(MathError::NegativeSquareRoot)
} else {
Ok(x.sqrt())
}
}
pub fn ln(x: f64) -> MathResult {
if x <= 0.0 {
Err(MathError::NonPositiveLogarithm)
} else {
Ok(x.ln())
}
}
}
fn op(x: f64, y: f64) -> f64 {
match checked::div(x, y) {
Err(why) => panic!("{:?}", why),
Ok(ratio) => match checked::ln(ratio) {
Err(why) => panic!("{:?}", why),
Ok(ln) => match checked::sqrt(ln) {
Err(why) => panic!("{:?}", why),
Ok(sqrt) => sqrt,
},
},
}
}
fn
|
() {
println!("{}", op(1.0, 10.0));
}
|
main
|
identifier_name
|
client.rs
|
use hyper::{body::Body, Client};
use opentelemetry::{
global,
sdk::export::trace::stdout,
sdk::trace as sdktrace,
trace::{TraceContextExt, Tracer},
Context, KeyValue,
};
use opentelemetry_aws::XrayPropagator;
use opentelemetry_http::HeaderInjector;
fn init_tracer() -> sdktrace::Tracer
|
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync +'static>> {
let _tracer = init_tracer();
let client = Client::new();
let span = global::tracer("example/client").start("say hello");
let cx = Context::current_with_span(span);
let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000");
global::get_text_map_propagator(|propagator| {
propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap()));
println!("Headers: {:?}", req.headers_ref());
});
let res = client.request(req.body(Body::from("Hallo!"))?).await?;
cx.span().add_event(
"Got response!".to_string(),
vec![KeyValue::new("status", res.status().to_string())],
);
Ok(())
}
|
{
global::set_text_map_propagator(XrayPropagator::new());
// Install stdout exporter pipeline to be able to retrieve the collected spans.
// For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production
// application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio.
stdout::new_pipeline()
.with_trace_config(
sdktrace::config()
.with_sampler(sdktrace::Sampler::AlwaysOn)
.with_id_generator(sdktrace::XrayIdGenerator::default()),
)
.install_simple()
}
|
identifier_body
|
client.rs
|
use hyper::{body::Body, Client};
use opentelemetry::{
global,
sdk::export::trace::stdout,
sdk::trace as sdktrace,
trace::{TraceContextExt, Tracer},
Context, KeyValue,
};
use opentelemetry_aws::XrayPropagator;
use opentelemetry_http::HeaderInjector;
fn
|
() -> sdktrace::Tracer {
global::set_text_map_propagator(XrayPropagator::new());
// Install stdout exporter pipeline to be able to retrieve the collected spans.
// For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production
// application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio.
stdout::new_pipeline()
.with_trace_config(
sdktrace::config()
.with_sampler(sdktrace::Sampler::AlwaysOn)
.with_id_generator(sdktrace::XrayIdGenerator::default()),
)
.install_simple()
}
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync +'static>> {
let _tracer = init_tracer();
let client = Client::new();
let span = global::tracer("example/client").start("say hello");
let cx = Context::current_with_span(span);
let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000");
global::get_text_map_propagator(|propagator| {
propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap()));
println!("Headers: {:?}", req.headers_ref());
});
let res = client.request(req.body(Body::from("Hallo!"))?).await?;
cx.span().add_event(
"Got response!".to_string(),
vec![KeyValue::new("status", res.status().to_string())],
);
Ok(())
}
|
init_tracer
|
identifier_name
|
client.rs
|
use hyper::{body::Body, Client};
use opentelemetry::{
global,
sdk::export::trace::stdout,
sdk::trace as sdktrace,
|
trace::{TraceContextExt, Tracer},
Context, KeyValue,
};
use opentelemetry_aws::XrayPropagator;
use opentelemetry_http::HeaderInjector;
fn init_tracer() -> sdktrace::Tracer {
global::set_text_map_propagator(XrayPropagator::new());
// Install stdout exporter pipeline to be able to retrieve the collected spans.
// For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production
// application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio.
stdout::new_pipeline()
.with_trace_config(
sdktrace::config()
.with_sampler(sdktrace::Sampler::AlwaysOn)
.with_id_generator(sdktrace::XrayIdGenerator::default()),
)
.install_simple()
}
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync +'static>> {
let _tracer = init_tracer();
let client = Client::new();
let span = global::tracer("example/client").start("say hello");
let cx = Context::current_with_span(span);
let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000");
global::get_text_map_propagator(|propagator| {
propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap()));
println!("Headers: {:?}", req.headers_ref());
});
let res = client.request(req.body(Body::from("Hallo!"))?).await?;
cx.span().add_event(
"Got response!".to_string(),
vec![KeyValue::new("status", res.status().to_string())],
);
Ok(())
}
|
random_line_split
|
|
builder.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
//! as Component instances.
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use crate::parser::{Combinator, Component, SelectorImpl};
use crate::sink::Push;
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
parsed_part: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let specificity = specificity(self.simple_selectors.iter());
let mut flags = SelectorFlags::empty();
if parsed_pseudo {
flags |= SelectorFlags::HAS_PSEUDO;
}
if parsed_slotted {
flags |= SelectorFlags::HAS_SLOTTED;
}
if parsed_part {
flags |= SelectorFlags::HAS_PART;
}
self.build_with_specificity_and_flags(SpecificityAndFlags { specificity, flags })
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain(..).rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, [(Combinator, usize); 16]>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn len(&self) -> usize {
self.current_simple_selectors.len() +
self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
bitflags! {
/// Flags that indicate at which point of parsing a selector are we.
#[derive(Default, ToShmem)]
pub (crate) struct SelectorFlags : u8 {
const HAS_PSEUDO = 1 << 0;
const HAS_SLOTTED = 1 << 1;
const HAS_PART = 1 << 2;
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ToShmem)]
pub struct SpecificityAndFlags {
/// There are two free bits here, since we use ten bits for each specificity
/// kind (id, class, element).
pub(crate) specificity: u32,
/// There's padding after this field due to the size of the flags.
pub(crate) flags: SelectorFlags,
}
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.specificity
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_PSEUDO)
}
#[inline]
pub fn is_slotted(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_SLOTTED)
}
#[inline]
pub fn is_part(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_PART)
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Add, AddAssign, Clone, Copy, Default, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(iter).into()
}
fn complex_selector_specificity<Impl>(iter: slice::Iter<Component<Impl>>) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(..) => {
unreachable!("Found combinator in simple selectors vector?");
},
Component::Part(..) | Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
},
Component::ID(..) => {
|
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::Negation(ref list) | Component::Is(ref list) => {
// https://drafts.csswg.org/selectors/#specificity-rules:
//
// The specificity of an :is() pseudo-class is replaced by the
// specificity of the most specific complex selector in its
// selector list argument.
let mut max = 0;
for selector in &**list {
max = std::cmp::max(selector.specificity(), max);
}
*specificity += Specificity::from(max);
},
Component::Where(..) |
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
}
}
let mut specificity = Default::default();
for simple_selector in iter {
simple_selector_specificity(&simple_selector, &mut specificity);
}
specificity
}
|
specificity.id_selectors += 1;
},
|
conditional_block
|
builder.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
//! as Component instances.
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use crate::parser::{Combinator, Component, SelectorImpl};
use crate::sink::Push;
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
parsed_part: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let specificity = specificity(self.simple_selectors.iter());
let mut flags = SelectorFlags::empty();
if parsed_pseudo {
flags |= SelectorFlags::HAS_PSEUDO;
}
if parsed_slotted {
flags |= SelectorFlags::HAS_SLOTTED;
}
if parsed_part {
flags |= SelectorFlags::HAS_PART;
}
self.build_with_specificity_and_flags(SpecificityAndFlags { specificity, flags })
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain(..).rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, [(Combinator, usize); 16]>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn len(&self) -> usize {
self.current_simple_selectors.len() +
self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
bitflags! {
/// Flags that indicate at which point of parsing a selector are we.
#[derive(Default, ToShmem)]
pub (crate) struct SelectorFlags : u8 {
const HAS_PSEUDO = 1 << 0;
const HAS_SLOTTED = 1 << 1;
const HAS_PART = 1 << 2;
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ToShmem)]
pub struct SpecificityAndFlags {
/// There are two free bits here, since we use ten bits for each specificity
/// kind (id, class, element).
pub(crate) specificity: u32,
/// There's padding after this field due to the size of the flags.
pub(crate) flags: SelectorFlags,
}
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.specificity
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_PSEUDO)
}
#[inline]
pub fn is_slotted(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_SLOTTED)
}
#[inline]
pub fn is_part(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_PART)
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Add, AddAssign, Clone, Copy, Default, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(iter).into()
}
fn comple
|
(iter: slice::Iter<Component<Impl>>) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(..) => {
unreachable!("Found combinator in simple selectors vector?");
},
Component::Part(..) | Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
},
Component::ID(..) => {
specificity.id_selectors += 1;
},
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::Negation(ref list) | Component::Is(ref list) => {
// https://drafts.csswg.org/selectors/#specificity-rules:
//
// The specificity of an :is() pseudo-class is replaced by the
// specificity of the most specific complex selector in its
// selector list argument.
let mut max = 0;
for selector in &**list {
max = std::cmp::max(selector.specificity(), max);
}
*specificity += Specificity::from(max);
},
Component::Where(..) |
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
}
}
let mut specificity = Default::default();
for simple_selector in iter {
simple_selector_specificity(&simple_selector, &mut specificity);
}
specificity
}
|
x_selector_specificity<Impl>
|
identifier_name
|
builder.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Helper module to build up a selector safely and efficiently.
//!
//! Our selector representation is designed to optimize matching, and has
//! several requirements:
//! * All simple selectors and combinators are stored inline in the same buffer
|
//! * We store the top-level compound selectors from right to left, i.e. in
//! matching order.
//! * We store the simple selectors for each combinator from left to right, so
//! that we match the cheaper simple selectors first.
//!
//! Meeting all these constraints without extra memmove traffic during parsing
//! is non-trivial. This module encapsulates those details and presents an
//! easy-to-use API for the parser.
use crate::parser::{Combinator, Component, SelectorImpl};
use crate::sink::Push;
use servo_arc::{Arc, HeaderWithLength, ThinArc};
use smallvec::{self, SmallVec};
use std::cmp;
use std::iter;
use std::ptr;
use std::slice;
/// Top-level SelectorBuilder struct. This should be stack-allocated by the
/// consumer and never moved (because it contains a lot of inline data that
/// would be slow to memmov).
///
/// After instantation, callers may call the push_simple_selector() and
/// push_combinator() methods to append selector data as it is encountered
/// (from left to right). Once the process is complete, callers should invoke
/// build(), which transforms the contents of the SelectorBuilder into a heap-
/// allocated Selector and leaves the builder in a drained state.
#[derive(Debug)]
pub struct SelectorBuilder<Impl: SelectorImpl> {
/// The entire sequence of simple selectors, from left to right, without combinators.
///
/// We make this large because the result of parsing a selector is fed into a new
/// Arc-ed allocation, so any spilled vec would be a wasted allocation. Also,
/// Components are large enough that we don't have much cache locality benefit
/// from reserving stack space for fewer of them.
simple_selectors: SmallVec<[Component<Impl>; 32]>,
/// The combinators, and the length of the compound selector to their left.
combinators: SmallVec<[(Combinator, usize); 16]>,
/// The length of the current compount selector.
current_len: usize,
}
impl<Impl: SelectorImpl> Default for SelectorBuilder<Impl> {
#[inline(always)]
fn default() -> Self {
SelectorBuilder {
simple_selectors: SmallVec::new(),
combinators: SmallVec::new(),
current_len: 0,
}
}
}
impl<Impl: SelectorImpl> Push<Component<Impl>> for SelectorBuilder<Impl> {
fn push(&mut self, value: Component<Impl>) {
self.push_simple_selector(value);
}
}
impl<Impl: SelectorImpl> SelectorBuilder<Impl> {
/// Pushes a simple selector onto the current compound selector.
#[inline(always)]
pub fn push_simple_selector(&mut self, ss: Component<Impl>) {
assert!(!ss.is_combinator());
self.simple_selectors.push(ss);
self.current_len += 1;
}
/// Completes the current compound selector and starts a new one, delimited
/// by the given combinator.
#[inline(always)]
pub fn push_combinator(&mut self, c: Combinator) {
self.combinators.push((c, self.current_len));
self.current_len = 0;
}
/// Returns true if combinators have ever been pushed to this builder.
#[inline(always)]
pub fn has_combinators(&self) -> bool {
!self.combinators.is_empty()
}
/// Consumes the builder, producing a Selector.
#[inline(always)]
pub fn build(
&mut self,
parsed_pseudo: bool,
parsed_slotted: bool,
parsed_part: bool,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// Compute the specificity and flags.
let specificity = specificity(self.simple_selectors.iter());
let mut flags = SelectorFlags::empty();
if parsed_pseudo {
flags |= SelectorFlags::HAS_PSEUDO;
}
if parsed_slotted {
flags |= SelectorFlags::HAS_SLOTTED;
}
if parsed_part {
flags |= SelectorFlags::HAS_PART;
}
self.build_with_specificity_and_flags(SpecificityAndFlags { specificity, flags })
}
/// Builds with an explicit SpecificityAndFlags. This is separated from build() so
/// that unit tests can pass an explicit specificity.
#[inline(always)]
pub fn build_with_specificity_and_flags(
&mut self,
spec: SpecificityAndFlags,
) -> ThinArc<SpecificityAndFlags, Component<Impl>> {
// First, compute the total number of Components we'll need to allocate
// space for.
let full_len = self.simple_selectors.len() + self.combinators.len();
// Create the header.
let header = HeaderWithLength::new(spec, full_len);
// Create the Arc using an iterator that drains our buffers.
// Use a raw pointer to be able to call set_len despite "borrowing" the slice.
// This is similar to SmallVec::drain, but we use a slice here because
// we’re gonna traverse it non-linearly.
let raw_simple_selectors: *const [Component<Impl>] = &*self.simple_selectors;
unsafe {
// Panic-safety: if SelectorBuilderIter is not iterated to the end,
// some simple selectors will safely leak.
self.simple_selectors.set_len(0)
}
let (rest, current) = split_from_end(unsafe { &*raw_simple_selectors }, self.current_len);
let iter = SelectorBuilderIter {
current_simple_selectors: current.iter(),
rest_of_simple_selectors: rest,
combinators: self.combinators.drain(..).rev(),
};
Arc::into_thin(Arc::from_header_and_iter(header, iter))
}
}
struct SelectorBuilderIter<'a, Impl: SelectorImpl> {
current_simple_selectors: slice::Iter<'a, Component<Impl>>,
rest_of_simple_selectors: &'a [Component<Impl>],
combinators: iter::Rev<smallvec::Drain<'a, [(Combinator, usize); 16]>>,
}
impl<'a, Impl: SelectorImpl> ExactSizeIterator for SelectorBuilderIter<'a, Impl> {
fn len(&self) -> usize {
self.current_simple_selectors.len() +
self.rest_of_simple_selectors.len() +
self.combinators.len()
}
}
impl<'a, Impl: SelectorImpl> Iterator for SelectorBuilderIter<'a, Impl> {
type Item = Component<Impl>;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
if let Some(simple_selector_ref) = self.current_simple_selectors.next() {
// Move a simple selector out of this slice iterator.
// This is safe because we’ve called SmallVec::set_len(0) above,
// so SmallVec::drop won’t drop this simple selector.
unsafe { Some(ptr::read(simple_selector_ref)) }
} else {
self.combinators.next().map(|(combinator, len)| {
let (rest, current) = split_from_end(self.rest_of_simple_selectors, len);
self.rest_of_simple_selectors = rest;
self.current_simple_selectors = current.iter();
Component::Combinator(combinator)
})
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
fn split_from_end<T>(s: &[T], at: usize) -> (&[T], &[T]) {
s.split_at(s.len() - at)
}
bitflags! {
/// Flags that indicate at which point of parsing a selector are we.
#[derive(Default, ToShmem)]
pub (crate) struct SelectorFlags : u8 {
const HAS_PSEUDO = 1 << 0;
const HAS_SLOTTED = 1 << 1;
const HAS_PART = 1 << 2;
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ToShmem)]
pub struct SpecificityAndFlags {
/// There are two free bits here, since we use ten bits for each specificity
/// kind (id, class, element).
pub(crate) specificity: u32,
/// There's padding after this field due to the size of the flags.
pub(crate) flags: SelectorFlags,
}
impl SpecificityAndFlags {
#[inline]
pub fn specificity(&self) -> u32 {
self.specificity
}
#[inline]
pub fn has_pseudo_element(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_PSEUDO)
}
#[inline]
pub fn is_slotted(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_SLOTTED)
}
#[inline]
pub fn is_part(&self) -> bool {
self.flags.intersects(SelectorFlags::HAS_PART)
}
}
const MAX_10BIT: u32 = (1u32 << 10) - 1;
#[derive(Add, AddAssign, Clone, Copy, Default, Eq, Ord, PartialEq, PartialOrd)]
struct Specificity {
id_selectors: u32,
class_like_selectors: u32,
element_selectors: u32,
}
impl From<u32> for Specificity {
#[inline]
fn from(value: u32) -> Specificity {
assert!(value <= MAX_10BIT << 20 | MAX_10BIT << 10 | MAX_10BIT);
Specificity {
id_selectors: value >> 20,
class_like_selectors: (value >> 10) & MAX_10BIT,
element_selectors: value & MAX_10BIT,
}
}
}
impl From<Specificity> for u32 {
#[inline]
fn from(specificity: Specificity) -> u32 {
cmp::min(specificity.id_selectors, MAX_10BIT) << 20 |
cmp::min(specificity.class_like_selectors, MAX_10BIT) << 10 |
cmp::min(specificity.element_selectors, MAX_10BIT)
}
}
fn specificity<Impl>(iter: slice::Iter<Component<Impl>>) -> u32
where
Impl: SelectorImpl,
{
complex_selector_specificity(iter).into()
}
fn complex_selector_specificity<Impl>(iter: slice::Iter<Component<Impl>>) -> Specificity
where
Impl: SelectorImpl,
{
fn simple_selector_specificity<Impl>(
simple_selector: &Component<Impl>,
specificity: &mut Specificity,
) where
Impl: SelectorImpl,
{
match *simple_selector {
Component::Combinator(..) => {
unreachable!("Found combinator in simple selectors vector?");
},
Component::Part(..) | Component::PseudoElement(..) | Component::LocalName(..) => {
specificity.element_selectors += 1
},
Component::Slotted(ref selector) => {
specificity.element_selectors += 1;
// Note that due to the way ::slotted works we only compete with
// other ::slotted rules, so the above rule doesn't really
// matter, but we do it still for consistency with other
// pseudo-elements.
//
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
},
Component::Host(ref selector) => {
specificity.class_like_selectors += 1;
if let Some(ref selector) = *selector {
// See: https://github.com/w3c/csswg-drafts/issues/1915
*specificity += Specificity::from(selector.specificity());
}
},
Component::ID(..) => {
specificity.id_selectors += 1;
},
Component::Class(..) |
Component::AttributeInNoNamespace {.. } |
Component::AttributeInNoNamespaceExists {.. } |
Component::AttributeOther(..) |
Component::FirstChild |
Component::LastChild |
Component::OnlyChild |
Component::Root |
Component::Empty |
Component::Scope |
Component::NthChild(..) |
Component::NthLastChild(..) |
Component::NthOfType(..) |
Component::NthLastOfType(..) |
Component::FirstOfType |
Component::LastOfType |
Component::OnlyOfType |
Component::NonTSPseudoClass(..) => {
specificity.class_like_selectors += 1;
},
Component::Negation(ref list) | Component::Is(ref list) => {
// https://drafts.csswg.org/selectors/#specificity-rules:
//
// The specificity of an :is() pseudo-class is replaced by the
// specificity of the most specific complex selector in its
// selector list argument.
let mut max = 0;
for selector in &**list {
max = std::cmp::max(selector.specificity(), max);
}
*specificity += Specificity::from(max);
},
Component::Where(..) |
Component::ExplicitUniversalType |
Component::ExplicitAnyNamespace |
Component::ExplicitNoNamespace |
Component::DefaultNamespace(..) |
Component::Namespace(..) => {
// Does not affect specificity
},
}
}
let mut specificity = Default::default();
for simple_selector in iter {
simple_selector_specificity(&simple_selector, &mut specificity);
}
specificity
}
|
//! as Component instances.
|
random_line_split
|
xrwebglsubimage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRWebGLSubImageBinding::XRWebGLSubImageBinding::XRWebGLSubImageMethods;
use crate::dom::bindings::root::Dom;
use crate::dom::bindings::root::DomRoot;
use crate::dom::webgltexture::WebGLTexture;
use crate::dom::xrsubimage::XRSubImage;
use dom_struct::dom_struct;
use euclid::Size2D;
use webxr_api::Viewport;
#[dom_struct]
pub struct XRWebGLSubImage {
xr_sub_image: XRSubImage,
color_texture: Dom<WebGLTexture>,
depth_stencil_texture: Option<Dom<WebGLTexture>>,
image_index: Option<u32>,
size: Size2D<u32, Viewport>,
}
impl XRWebGLSubImageMethods for XRWebGLSubImage {
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-colortexture
fn ColorTexture(&self) -> DomRoot<WebGLTexture> {
DomRoot::from_ref(&self.color_texture)
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-depthstenciltexture
fn GetDepthStencilTexture(&self) -> Option<DomRoot<WebGLTexture>>
|
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-imageindex
fn GetImageIndex(&self) -> Option<u32> {
self.image_index
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-texturewidth
fn TextureWidth(&self) -> u32 {
self.size.width
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-textureheight
fn TextureHeight(&self) -> u32 {
self.size.height
}
}
|
{
self.depth_stencil_texture.as_deref().map(DomRoot::from_ref)
}
|
identifier_body
|
xrwebglsubimage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRWebGLSubImageBinding::XRWebGLSubImageBinding::XRWebGLSubImageMethods;
use crate::dom::bindings::root::Dom;
use crate::dom::bindings::root::DomRoot;
use crate::dom::webgltexture::WebGLTexture;
use crate::dom::xrsubimage::XRSubImage;
use dom_struct::dom_struct;
use euclid::Size2D;
use webxr_api::Viewport;
#[dom_struct]
pub struct XRWebGLSubImage {
xr_sub_image: XRSubImage,
color_texture: Dom<WebGLTexture>,
depth_stencil_texture: Option<Dom<WebGLTexture>>,
image_index: Option<u32>,
size: Size2D<u32, Viewport>,
}
impl XRWebGLSubImageMethods for XRWebGLSubImage {
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-colortexture
fn ColorTexture(&self) -> DomRoot<WebGLTexture> {
DomRoot::from_ref(&self.color_texture)
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-depthstenciltexture
fn GetDepthStencilTexture(&self) -> Option<DomRoot<WebGLTexture>> {
self.depth_stencil_texture.as_deref().map(DomRoot::from_ref)
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-imageindex
fn GetImageIndex(&self) -> Option<u32> {
self.image_index
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-texturewidth
fn
|
(&self) -> u32 {
self.size.width
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-textureheight
fn TextureHeight(&self) -> u32 {
self.size.height
}
}
|
TextureWidth
|
identifier_name
|
xrwebglsubimage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRWebGLSubImageBinding::XRWebGLSubImageBinding::XRWebGLSubImageMethods;
use crate::dom::bindings::root::Dom;
use crate::dom::bindings::root::DomRoot;
use crate::dom::webgltexture::WebGLTexture;
use crate::dom::xrsubimage::XRSubImage;
use dom_struct::dom_struct;
use euclid::Size2D;
use webxr_api::Viewport;
#[dom_struct]
pub struct XRWebGLSubImage {
xr_sub_image: XRSubImage,
color_texture: Dom<WebGLTexture>,
depth_stencil_texture: Option<Dom<WebGLTexture>>,
image_index: Option<u32>,
size: Size2D<u32, Viewport>,
}
impl XRWebGLSubImageMethods for XRWebGLSubImage {
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-colortexture
fn ColorTexture(&self) -> DomRoot<WebGLTexture> {
DomRoot::from_ref(&self.color_texture)
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-depthstenciltexture
fn GetDepthStencilTexture(&self) -> Option<DomRoot<WebGLTexture>> {
self.depth_stencil_texture.as_deref().map(DomRoot::from_ref)
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-imageindex
fn GetImageIndex(&self) -> Option<u32> {
self.image_index
}
|
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-texturewidth
fn TextureWidth(&self) -> u32 {
self.size.width
}
/// https://immersive-web.github.io/layers/#dom-xrwebglsubimage-textureheight
fn TextureHeight(&self) -> u32 {
self.size.height
}
}
|
random_line_split
|
|
font.rs
|
// Copyright (c) 2016-2018 Bruce Stenning. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
use std::collections::*;
use std::sync::*;
use std::boxed::Box;
use std::fmt;
use wyvern::graphics::renderer::*;
use wyvern::graphics::image::*;
use wyvern::graphics::texture::*;
use wyvern::graphics::shader::*;
use wyvern::graphics::resources::*;
use wyvern::algebra::vector::*;
use options::*;
pub struct Font {
pub shader: Box<Shader>,
pub image: Box<Image>,
pub texture: Box<Texture>,
pub char_coords: BTreeMap<char, (f32, f32, f32, f32)>,
pub char_width: u32,
pub char_height: u32,
pub scale: f32,
thread_data: ThreadData,
}
impl Font {
/// Load the shaders before construction, to allow the renderer to be finalised
///
/// resource_zip: The optional name of a zip archive to use instead of reading from the filesystem
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
/// filename: The filename of the font image to use
/// pad: true if the image should be padded with an alpha channel
pub fn new(
resource_zip: Option<&str>,
renderer: &mut Box<Renderer>,
resource_manager: &Arc<Mutex<Box<ResourceManager>>>,
filename: &str,
pad: bool,
) -> Font {
// Load font image
let image = Box::new(Image::load_from_png(resource_zip, filename, pad));
// Set up the font shader and texture
//
let texture = renderer.create_texture(
image.get_width(),
image.get_height(),
false, /* depth */
false, /* float */
1, /* No multisampling */
&image.get_data(),
);
let mut shader = renderer.create_shader();
// Set up shader
//
let name = "font".to_string();
let spec = resource_manager.lock().unwrap().get_shader_spec(&name);
shader.build_shader(name, resource_zip, renderer, resource_manager, &spec);
Font {
shader: shader,
image: image,
texture: texture,
char_coords: BTreeMap::new(),
char_width: 0,
char_height: 0,
scale: 1.0f32,
thread_data: ThreadData::new(0),
}
}
/// Set up the specified monospaced font
///
/// char_width: The width of a character in pixels
/// char_height: The height of a character in pixels
/// rows: The number of rows of characters
/// columns: The number of columns of characters
/// start_x: The pixel offset of the left of the first row of characters
/// stary_y: The pixel offset of the top of the first row of characters
/// map: A map of the location of the characters
/// scale: Scale for character size
pub fn setup(
&mut self,
char_width: u32,
char_height: u32,
rows: u32,
columns: u32,
start_x: u32,
start_y: u32,
scale: f32,
map: &str,
) {
self.char_width = char_width;
self.char_height = char_height;
self.scale = scale;
let mut i = 0;
for y in 0..rows {
for x in 0..columns {
let l = start_x + x * self.char_width;
let r = start_x + (x + 1) * self.char_width;
let t = start_y + (y + 1) * self.char_height;
let b = start_y + y * self.char_height;
let c = map.as_bytes()[i] as char;
self.char_coords.insert(c, (
l as f32 / self.image.get_width() as f32,
t as f32 / self.image.get_height() as f32,
r as f32 / self.image.get_width() as f32,
b as f32 / self.image.get_height() as f32,
));
i += 1;
}
}
self.reconfigure_shaders();
self.thread_data.vertex_array_type = VertexArrayType::F2F2;
self.thread_data.primitive = PrimitiveType::PrimitiveTriangles;
}
/// Reconfiguration of the shader(s)
///
/// If the shader(s) are recompiled, this needs to be run in
/// addition to the per-frame setup of uniforms
fn reconfigure_shaders(&self) {
self.shader.select();
self.shader.setup_float_attribute_pointer(
"position",
2, // components
4, // stride
0, // offset
);
self.shader.setup_float_attribute_pointer(
"texture_coordinates",
2, // components
4, // stride
2, // offset
);
}
/// Check for shader file changes and rebuild if necessary
///
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
pub fn check_for_shader_rebuild(&mut self, renderer: &Box<Renderer>, resource_manager: &Arc<Mutex<Box<ResourceManager>>>) {
if self.shader.check_for_rebuild(
None,
renderer,
resource_manager,
)
{
self.reconfigure_shaders();
}
// Turn off shader warnings until the next time it's required
self.shader.set_generate_warnings(false);
}
/// Begin a pass of font rendering
///
/// renderer: The renderer object
pub fn begin_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
renderer.begin_pass("font");
self.shader.select();
self.shader.set_uniform_int("font_texture", 0);
self.texture.bind(0);
}
/// End a pass of font rendering
///
/// renderer: The renderer object
pub fn end_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
self.thread_data.flush_st(renderer);
renderer.end_pass();
}
/// Render the text to the screen
///
/// renderer: The renderer object
/// options: The options object
/// text: The text to render
/// x: The x location to start rendering at
/// y: The y location to start rendering at
pub fn render_text(&mut self, options: &Options, text: &String, x: i32, y: i32) {
let mut i: i32 = 0;
for c in text.chars() {
let l = (x as f32 + i as f32 * self.char_width as f32) / options.width as f32;
let r = (x as f32 + (i + 1) as f32 * self.char_width as f32) / options.width as f32;
let t = y as f32 / options.height as f32;
let b = (y as f32 + self.char_height as f32) / options.height as f32;
let v = vec![
Vec2 { x: l, y: t } * self.scale,
Vec2 { x: r, y: t } * self.scale,
Vec2 { x: r, y: b } * self.scale,
Vec2 { x: l, y: b } * self.scale,
];
if self.char_coords.contains_key(&c)
|
else {
println!("No char {} found", c);
}
i += 1;
}
}
}
// Output the handle associated with the font's image, for debug purposes
impl fmt::Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.texture)
}
}
|
{
let (il, it, ir, ib) = self.char_coords[&c];
let t = vec![
Vec2 { x: il, y: it },
Vec2 { x: ir, y: it },
Vec2 { x: ir, y: ib },
Vec2 { x: il, y: ib },
];
let i1 = self.thread_data.add_vertex_st_f2f2(&v[0], &t[0]);
let i2 = self.thread_data.add_vertex_st_f2f2(&v[1], &t[1]);
let i3 = self.thread_data.add_vertex_st_f2f2(&v[2], &t[2]);
let i4 = self.thread_data.add_vertex_st_f2f2(&v[3], &t[3]);
self.thread_data.add_triangle_st(i1, i2, i3);
self.thread_data.add_triangle_st(i3, i4, i1);
}
|
conditional_block
|
font.rs
|
// Copyright (c) 2016-2018 Bruce Stenning. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
use std::collections::*;
use std::sync::*;
use std::boxed::Box;
use std::fmt;
use wyvern::graphics::renderer::*;
use wyvern::graphics::image::*;
use wyvern::graphics::texture::*;
use wyvern::graphics::shader::*;
use wyvern::graphics::resources::*;
use wyvern::algebra::vector::*;
use options::*;
pub struct Font {
pub shader: Box<Shader>,
pub image: Box<Image>,
pub texture: Box<Texture>,
pub char_coords: BTreeMap<char, (f32, f32, f32, f32)>,
pub char_width: u32,
pub char_height: u32,
pub scale: f32,
thread_data: ThreadData,
}
impl Font {
/// Load the shaders before construction, to allow the renderer to be finalised
///
/// resource_zip: The optional name of a zip archive to use instead of reading from the filesystem
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
/// filename: The filename of the font image to use
/// pad: true if the image should be padded with an alpha channel
pub fn new(
resource_zip: Option<&str>,
renderer: &mut Box<Renderer>,
resource_manager: &Arc<Mutex<Box<ResourceManager>>>,
filename: &str,
pad: bool,
) -> Font {
// Load font image
let image = Box::new(Image::load_from_png(resource_zip, filename, pad));
// Set up the font shader and texture
//
let texture = renderer.create_texture(
image.get_width(),
image.get_height(),
false, /* depth */
false, /* float */
1, /* No multisampling */
&image.get_data(),
);
let mut shader = renderer.create_shader();
// Set up shader
//
let name = "font".to_string();
let spec = resource_manager.lock().unwrap().get_shader_spec(&name);
shader.build_shader(name, resource_zip, renderer, resource_manager, &spec);
Font {
shader: shader,
image: image,
texture: texture,
char_coords: BTreeMap::new(),
char_width: 0,
char_height: 0,
scale: 1.0f32,
thread_data: ThreadData::new(0),
}
}
/// Set up the specified monospaced font
///
/// char_width: The width of a character in pixels
/// char_height: The height of a character in pixels
/// rows: The number of rows of characters
/// columns: The number of columns of characters
/// start_x: The pixel offset of the left of the first row of characters
/// stary_y: The pixel offset of the top of the first row of characters
/// map: A map of the location of the characters
/// scale: Scale for character size
pub fn setup(
&mut self,
char_width: u32,
char_height: u32,
rows: u32,
columns: u32,
start_x: u32,
start_y: u32,
scale: f32,
map: &str,
) {
self.char_width = char_width;
self.char_height = char_height;
self.scale = scale;
let mut i = 0;
for y in 0..rows {
for x in 0..columns {
let l = start_x + x * self.char_width;
let r = start_x + (x + 1) * self.char_width;
let t = start_y + (y + 1) * self.char_height;
let b = start_y + y * self.char_height;
let c = map.as_bytes()[i] as char;
self.char_coords.insert(c, (
l as f32 / self.image.get_width() as f32,
t as f32 / self.image.get_height() as f32,
r as f32 / self.image.get_width() as f32,
b as f32 / self.image.get_height() as f32,
));
i += 1;
}
}
self.reconfigure_shaders();
self.thread_data.vertex_array_type = VertexArrayType::F2F2;
self.thread_data.primitive = PrimitiveType::PrimitiveTriangles;
}
/// Reconfiguration of the shader(s)
///
/// If the shader(s) are recompiled, this needs to be run in
/// addition to the per-frame setup of uniforms
fn reconfigure_shaders(&self) {
self.shader.select();
self.shader.setup_float_attribute_pointer(
"position",
2, // components
4, // stride
0, // offset
);
self.shader.setup_float_attribute_pointer(
"texture_coordinates",
2, // components
4, // stride
2, // offset
);
}
/// Check for shader file changes and rebuild if necessary
///
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
pub fn check_for_shader_rebuild(&mut self, renderer: &Box<Renderer>, resource_manager: &Arc<Mutex<Box<ResourceManager>>>) {
if self.shader.check_for_rebuild(
None,
renderer,
resource_manager,
)
{
self.reconfigure_shaders();
}
// Turn off shader warnings until the next time it's required
self.shader.set_generate_warnings(false);
}
/// Begin a pass of font rendering
///
/// renderer: The renderer object
pub fn begin_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
renderer.begin_pass("font");
self.shader.select();
self.shader.set_uniform_int("font_texture", 0);
self.texture.bind(0);
}
/// End a pass of font rendering
///
/// renderer: The renderer object
pub fn end_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
self.thread_data.flush_st(renderer);
renderer.end_pass();
}
/// Render the text to the screen
///
/// renderer: The renderer object
/// options: The options object
/// text: The text to render
/// x: The x location to start rendering at
/// y: The y location to start rendering at
pub fn render_text(&mut self, options: &Options, text: &String, x: i32, y: i32) {
let mut i: i32 = 0;
for c in text.chars() {
let l = (x as f32 + i as f32 * self.char_width as f32) / options.width as f32;
let r = (x as f32 + (i + 1) as f32 * self.char_width as f32) / options.width as f32;
let t = y as f32 / options.height as f32;
let b = (y as f32 + self.char_height as f32) / options.height as f32;
let v = vec![
Vec2 { x: l, y: t } * self.scale,
Vec2 { x: r, y: t } * self.scale,
Vec2 { x: r, y: b } * self.scale,
Vec2 { x: l, y: b } * self.scale,
];
|
Vec2 { x: ir, y: it },
Vec2 { x: ir, y: ib },
Vec2 { x: il, y: ib },
];
let i1 = self.thread_data.add_vertex_st_f2f2(&v[0], &t[0]);
let i2 = self.thread_data.add_vertex_st_f2f2(&v[1], &t[1]);
let i3 = self.thread_data.add_vertex_st_f2f2(&v[2], &t[2]);
let i4 = self.thread_data.add_vertex_st_f2f2(&v[3], &t[3]);
self.thread_data.add_triangle_st(i1, i2, i3);
self.thread_data.add_triangle_st(i3, i4, i1);
} else {
println!("No char {} found", c);
}
i += 1;
}
}
}
// Output the handle associated with the font's image, for debug purposes
impl fmt::Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.texture)
}
}
|
if self.char_coords.contains_key(&c) {
let (il, it, ir, ib) = self.char_coords[&c];
let t = vec![
Vec2 { x: il, y: it },
|
random_line_split
|
font.rs
|
// Copyright (c) 2016-2018 Bruce Stenning. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
use std::collections::*;
use std::sync::*;
use std::boxed::Box;
use std::fmt;
use wyvern::graphics::renderer::*;
use wyvern::graphics::image::*;
use wyvern::graphics::texture::*;
use wyvern::graphics::shader::*;
use wyvern::graphics::resources::*;
use wyvern::algebra::vector::*;
use options::*;
pub struct Font {
pub shader: Box<Shader>,
pub image: Box<Image>,
pub texture: Box<Texture>,
pub char_coords: BTreeMap<char, (f32, f32, f32, f32)>,
pub char_width: u32,
pub char_height: u32,
pub scale: f32,
thread_data: ThreadData,
}
impl Font {
/// Load the shaders before construction, to allow the renderer to be finalised
///
/// resource_zip: The optional name of a zip archive to use instead of reading from the filesystem
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
/// filename: The filename of the font image to use
/// pad: true if the image should be padded with an alpha channel
pub fn new(
resource_zip: Option<&str>,
renderer: &mut Box<Renderer>,
resource_manager: &Arc<Mutex<Box<ResourceManager>>>,
filename: &str,
pad: bool,
) -> Font {
// Load font image
let image = Box::new(Image::load_from_png(resource_zip, filename, pad));
// Set up the font shader and texture
//
let texture = renderer.create_texture(
image.get_width(),
image.get_height(),
false, /* depth */
false, /* float */
1, /* No multisampling */
&image.get_data(),
);
let mut shader = renderer.create_shader();
// Set up shader
//
let name = "font".to_string();
let spec = resource_manager.lock().unwrap().get_shader_spec(&name);
shader.build_shader(name, resource_zip, renderer, resource_manager, &spec);
Font {
shader: shader,
image: image,
texture: texture,
char_coords: BTreeMap::new(),
char_width: 0,
char_height: 0,
scale: 1.0f32,
thread_data: ThreadData::new(0),
}
}
/// Set up the specified monospaced font
///
/// char_width: The width of a character in pixels
/// char_height: The height of a character in pixels
/// rows: The number of rows of characters
/// columns: The number of columns of characters
/// start_x: The pixel offset of the left of the first row of characters
/// stary_y: The pixel offset of the top of the first row of characters
/// map: A map of the location of the characters
/// scale: Scale for character size
pub fn setup(
&mut self,
char_width: u32,
char_height: u32,
rows: u32,
columns: u32,
start_x: u32,
start_y: u32,
scale: f32,
map: &str,
) {
self.char_width = char_width;
self.char_height = char_height;
self.scale = scale;
let mut i = 0;
for y in 0..rows {
for x in 0..columns {
let l = start_x + x * self.char_width;
let r = start_x + (x + 1) * self.char_width;
let t = start_y + (y + 1) * self.char_height;
let b = start_y + y * self.char_height;
let c = map.as_bytes()[i] as char;
self.char_coords.insert(c, (
l as f32 / self.image.get_width() as f32,
t as f32 / self.image.get_height() as f32,
r as f32 / self.image.get_width() as f32,
b as f32 / self.image.get_height() as f32,
));
i += 1;
}
}
self.reconfigure_shaders();
self.thread_data.vertex_array_type = VertexArrayType::F2F2;
self.thread_data.primitive = PrimitiveType::PrimitiveTriangles;
}
/// Reconfiguration of the shader(s)
///
/// If the shader(s) are recompiled, this needs to be run in
/// addition to the per-frame setup of uniforms
fn reconfigure_shaders(&self) {
self.shader.select();
self.shader.setup_float_attribute_pointer(
"position",
2, // components
4, // stride
0, // offset
);
self.shader.setup_float_attribute_pointer(
"texture_coordinates",
2, // components
4, // stride
2, // offset
);
}
/// Check for shader file changes and rebuild if necessary
///
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
pub fn check_for_shader_rebuild(&mut self, renderer: &Box<Renderer>, resource_manager: &Arc<Mutex<Box<ResourceManager>>>) {
if self.shader.check_for_rebuild(
None,
renderer,
resource_manager,
)
{
self.reconfigure_shaders();
}
// Turn off shader warnings until the next time it's required
self.shader.set_generate_warnings(false);
}
/// Begin a pass of font rendering
///
/// renderer: The renderer object
pub fn begin_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
renderer.begin_pass("font");
self.shader.select();
self.shader.set_uniform_int("font_texture", 0);
self.texture.bind(0);
}
/// End a pass of font rendering
///
/// renderer: The renderer object
pub fn end_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
self.thread_data.flush_st(renderer);
renderer.end_pass();
}
/// Render the text to the screen
///
/// renderer: The renderer object
/// options: The options object
/// text: The text to render
/// x: The x location to start rendering at
/// y: The y location to start rendering at
pub fn render_text(&mut self, options: &Options, text: &String, x: i32, y: i32) {
let mut i: i32 = 0;
for c in text.chars() {
let l = (x as f32 + i as f32 * self.char_width as f32) / options.width as f32;
let r = (x as f32 + (i + 1) as f32 * self.char_width as f32) / options.width as f32;
let t = y as f32 / options.height as f32;
let b = (y as f32 + self.char_height as f32) / options.height as f32;
let v = vec![
Vec2 { x: l, y: t } * self.scale,
Vec2 { x: r, y: t } * self.scale,
Vec2 { x: r, y: b } * self.scale,
Vec2 { x: l, y: b } * self.scale,
];
if self.char_coords.contains_key(&c) {
let (il, it, ir, ib) = self.char_coords[&c];
let t = vec![
Vec2 { x: il, y: it },
Vec2 { x: ir, y: it },
Vec2 { x: ir, y: ib },
Vec2 { x: il, y: ib },
];
let i1 = self.thread_data.add_vertex_st_f2f2(&v[0], &t[0]);
let i2 = self.thread_data.add_vertex_st_f2f2(&v[1], &t[1]);
let i3 = self.thread_data.add_vertex_st_f2f2(&v[2], &t[2]);
let i4 = self.thread_data.add_vertex_st_f2f2(&v[3], &t[3]);
self.thread_data.add_triangle_st(i1, i2, i3);
self.thread_data.add_triangle_st(i3, i4, i1);
} else {
println!("No char {} found", c);
}
i += 1;
}
}
}
// Output the handle associated with the font's image, for debug purposes
impl fmt::Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
|
{
write!(f, "{:?}", self.texture)
}
|
identifier_body
|
font.rs
|
// Copyright (c) 2016-2018 Bruce Stenning. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
use std::collections::*;
use std::sync::*;
use std::boxed::Box;
use std::fmt;
use wyvern::graphics::renderer::*;
use wyvern::graphics::image::*;
use wyvern::graphics::texture::*;
use wyvern::graphics::shader::*;
use wyvern::graphics::resources::*;
use wyvern::algebra::vector::*;
use options::*;
pub struct Font {
pub shader: Box<Shader>,
pub image: Box<Image>,
pub texture: Box<Texture>,
pub char_coords: BTreeMap<char, (f32, f32, f32, f32)>,
pub char_width: u32,
pub char_height: u32,
pub scale: f32,
thread_data: ThreadData,
}
impl Font {
/// Load the shaders before construction, to allow the renderer to be finalised
///
/// resource_zip: The optional name of a zip archive to use instead of reading from the filesystem
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
/// filename: The filename of the font image to use
/// pad: true if the image should be padded with an alpha channel
pub fn new(
resource_zip: Option<&str>,
renderer: &mut Box<Renderer>,
resource_manager: &Arc<Mutex<Box<ResourceManager>>>,
filename: &str,
pad: bool,
) -> Font {
// Load font image
let image = Box::new(Image::load_from_png(resource_zip, filename, pad));
// Set up the font shader and texture
//
let texture = renderer.create_texture(
image.get_width(),
image.get_height(),
false, /* depth */
false, /* float */
1, /* No multisampling */
&image.get_data(),
);
let mut shader = renderer.create_shader();
// Set up shader
//
let name = "font".to_string();
let spec = resource_manager.lock().unwrap().get_shader_spec(&name);
shader.build_shader(name, resource_zip, renderer, resource_manager, &spec);
Font {
shader: shader,
image: image,
texture: texture,
char_coords: BTreeMap::new(),
char_width: 0,
char_height: 0,
scale: 1.0f32,
thread_data: ThreadData::new(0),
}
}
/// Set up the specified monospaced font
///
/// char_width: The width of a character in pixels
/// char_height: The height of a character in pixels
/// rows: The number of rows of characters
/// columns: The number of columns of characters
/// start_x: The pixel offset of the left of the first row of characters
/// stary_y: The pixel offset of the top of the first row of characters
/// map: A map of the location of the characters
/// scale: Scale for character size
pub fn setup(
&mut self,
char_width: u32,
char_height: u32,
rows: u32,
columns: u32,
start_x: u32,
start_y: u32,
scale: f32,
map: &str,
) {
self.char_width = char_width;
self.char_height = char_height;
self.scale = scale;
let mut i = 0;
for y in 0..rows {
for x in 0..columns {
let l = start_x + x * self.char_width;
let r = start_x + (x + 1) * self.char_width;
let t = start_y + (y + 1) * self.char_height;
let b = start_y + y * self.char_height;
let c = map.as_bytes()[i] as char;
self.char_coords.insert(c, (
l as f32 / self.image.get_width() as f32,
t as f32 / self.image.get_height() as f32,
r as f32 / self.image.get_width() as f32,
b as f32 / self.image.get_height() as f32,
));
i += 1;
}
}
self.reconfigure_shaders();
self.thread_data.vertex_array_type = VertexArrayType::F2F2;
self.thread_data.primitive = PrimitiveType::PrimitiveTriangles;
}
/// Reconfiguration of the shader(s)
///
/// If the shader(s) are recompiled, this needs to be run in
/// addition to the per-frame setup of uniforms
fn reconfigure_shaders(&self) {
self.shader.select();
self.shader.setup_float_attribute_pointer(
"position",
2, // components
4, // stride
0, // offset
);
self.shader.setup_float_attribute_pointer(
"texture_coordinates",
2, // components
4, // stride
2, // offset
);
}
/// Check for shader file changes and rebuild if necessary
///
/// renderer: The renderer object
/// resource_manager: The shader resources manager for obtaining shader details
pub fn
|
(&mut self, renderer: &Box<Renderer>, resource_manager: &Arc<Mutex<Box<ResourceManager>>>) {
if self.shader.check_for_rebuild(
None,
renderer,
resource_manager,
)
{
self.reconfigure_shaders();
}
// Turn off shader warnings until the next time it's required
self.shader.set_generate_warnings(false);
}
/// Begin a pass of font rendering
///
/// renderer: The renderer object
pub fn begin_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
renderer.begin_pass("font");
self.shader.select();
self.shader.set_uniform_int("font_texture", 0);
self.texture.bind(0);
}
/// End a pass of font rendering
///
/// renderer: The renderer object
pub fn end_pass<Rend: Renderer +?Sized>(&mut self, renderer: &mut Rend) {
self.thread_data.flush_st(renderer);
renderer.end_pass();
}
/// Render the text to the screen
///
/// renderer: The renderer object
/// options: The options object
/// text: The text to render
/// x: The x location to start rendering at
/// y: The y location to start rendering at
pub fn render_text(&mut self, options: &Options, text: &String, x: i32, y: i32) {
let mut i: i32 = 0;
for c in text.chars() {
let l = (x as f32 + i as f32 * self.char_width as f32) / options.width as f32;
let r = (x as f32 + (i + 1) as f32 * self.char_width as f32) / options.width as f32;
let t = y as f32 / options.height as f32;
let b = (y as f32 + self.char_height as f32) / options.height as f32;
let v = vec![
Vec2 { x: l, y: t } * self.scale,
Vec2 { x: r, y: t } * self.scale,
Vec2 { x: r, y: b } * self.scale,
Vec2 { x: l, y: b } * self.scale,
];
if self.char_coords.contains_key(&c) {
let (il, it, ir, ib) = self.char_coords[&c];
let t = vec![
Vec2 { x: il, y: it },
Vec2 { x: ir, y: it },
Vec2 { x: ir, y: ib },
Vec2 { x: il, y: ib },
];
let i1 = self.thread_data.add_vertex_st_f2f2(&v[0], &t[0]);
let i2 = self.thread_data.add_vertex_st_f2f2(&v[1], &t[1]);
let i3 = self.thread_data.add_vertex_st_f2f2(&v[2], &t[2]);
let i4 = self.thread_data.add_vertex_st_f2f2(&v[3], &t[3]);
self.thread_data.add_triangle_st(i1, i2, i3);
self.thread_data.add_triangle_st(i3, i4, i1);
} else {
println!("No char {} found", c);
}
i += 1;
}
}
}
// Output the handle associated with the font's image, for debug purposes
impl fmt::Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.texture)
}
}
|
check_for_shader_rebuild
|
identifier_name
|
dns_resolver.rs
|
//! Replacement of service's DNS resolver
use std::{
io::{self, ErrorKind},
net::SocketAddr,
};
use async_trait::async_trait;
use futures::future;
use log::{debug, trace};
use trust_dns_resolver::proto::{
op::{Message, Query},
rr::{DNSClass, Name, RData, RecordType},
};
use shadowsocks::{config::Mode, dns_resolver::DnsResolve, net::ConnectOpts};
use super::{client_cache::DnsClientCache, config::NameServerAddr};
pub struct DnsResolver {
ns: NameServerAddr,
client_cache: DnsClientCache,
mode: Mode,
ipv6_first: bool,
connect_opts: ConnectOpts,
attempts: usize,
}
impl DnsResolver {
pub fn new(ns: NameServerAddr) -> DnsResolver {
DnsResolver {
ns,
client_cache: DnsClientCache::new(5),
mode: Mode::UdpOnly,
ipv6_first: false,
connect_opts: ConnectOpts::default(),
attempts: 2,
}
}
pub fn set_mode(&mut self, mode: Mode) {
self.mode = mode;
}
pub fn set_ipv6_first(&mut self, ipv6_first: bool) {
self.ipv6_first = ipv6_first;
}
pub fn set_connect_opts(&mut self, connect_opts: ConnectOpts) {
self.connect_opts = connect_opts;
}
async fn lookup(&self, msg: Message) -> io::Result<Message> {
let mut last_err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
for _ in 0..self.attempts {
match self.lookup_inner(msg.clone()).await {
Ok(m) => return Ok(m),
Err(err) => last_err = err,
}
}
Err(last_err)
}
async fn lookup_inner(&self, msg: Message) -> io::Result<Message> {
match self.ns {
NameServerAddr::SocketAddr(ns) => {
let mut last_err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
// Query UDP then TCP
if self.mode.enable_udp() {
match self.client_cache.lookup_local(ns, msg.clone(), &self.connect_opts, true)
.await
{
Ok(msg) => return Ok(msg),
Err(err) => {
last_err = err.into();
}
}
}
if self.mode.enable_tcp() {
match self.client_cache.lookup_local(ns, msg, &self.connect_opts, false).await {
Ok(msg) => return Ok(msg),
Err(err) => {
last_err = err.into();
}
}
}
Err(last_err)
}
#[cfg(unix)]
NameServerAddr::UnixSocketAddr(ref path) => self
.client_cache
.lookup_unix_stream(path, msg)
.await
.map_err(From::from),
}
}
}
#[async_trait]
impl DnsResolve for DnsResolver {
async fn resolve(&self, host: &str, port: u16) -> io::Result<Vec<SocketAddr>> {
let mut name = Name::from_utf8(host)?;
name.set_fqdn(true);
let mut queryv4 = Query::new();
queryv4.set_query_class(DNSClass::IN);
queryv4.set_name(name);
let mut queryv6 = queryv4.clone();
queryv4.set_query_type(RecordType::A);
queryv6.set_query_type(RecordType::AAAA);
let mut msgv4 = Message::new();
msgv4.set_recursion_desired(true);
msgv4.add_query(queryv4);
let mut msgv6 = Message::new();
msgv6.set_recursion_desired(true);
msgv6.add_query(queryv6);
match future::join(self.lookup(msgv4), self.lookup(msgv6)).await {
(Err(res_v4), Err(res_v6)) => {
if self.ipv6_first {
Err(res_v6)
} else {
Err(res_v4)
}
}
(res_v4, res_v6) => {
let mut vaddr: Vec<SocketAddr> = vec![];
if self.ipv6_first {
match res_v6 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve AAAA records, error: {}", err),
}
match res_v4 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
} else {
match res_v4 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
match res_v6 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
}
if vaddr.is_empty() {
let err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
return Err(err);
}
Ok(vaddr)
}
}
}
}
fn store_dns(res: Message, port: u16) -> Vec<SocketAddr>
|
{
let mut vaddr = Vec::new();
for record in res.answers() {
match *record.rdata() {
RData::A(addr) => vaddr.push(SocketAddr::new(addr.into(), port)),
RData::AAAA(addr) => vaddr.push(SocketAddr::new(addr.into(), port)),
ref rdata => {
trace!("skipped rdata {:?}", rdata);
}
}
}
vaddr
}
|
identifier_body
|
|
dns_resolver.rs
|
//! Replacement of service's DNS resolver
use std::{
io::{self, ErrorKind},
net::SocketAddr,
};
use async_trait::async_trait;
use futures::future;
use log::{debug, trace};
use trust_dns_resolver::proto::{
op::{Message, Query},
rr::{DNSClass, Name, RData, RecordType},
};
use shadowsocks::{config::Mode, dns_resolver::DnsResolve, net::ConnectOpts};
use super::{client_cache::DnsClientCache, config::NameServerAddr};
pub struct DnsResolver {
ns: NameServerAddr,
client_cache: DnsClientCache,
|
}
impl DnsResolver {
pub fn new(ns: NameServerAddr) -> DnsResolver {
DnsResolver {
ns,
client_cache: DnsClientCache::new(5),
mode: Mode::UdpOnly,
ipv6_first: false,
connect_opts: ConnectOpts::default(),
attempts: 2,
}
}
pub fn set_mode(&mut self, mode: Mode) {
self.mode = mode;
}
pub fn set_ipv6_first(&mut self, ipv6_first: bool) {
self.ipv6_first = ipv6_first;
}
pub fn set_connect_opts(&mut self, connect_opts: ConnectOpts) {
self.connect_opts = connect_opts;
}
async fn lookup(&self, msg: Message) -> io::Result<Message> {
let mut last_err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
for _ in 0..self.attempts {
match self.lookup_inner(msg.clone()).await {
Ok(m) => return Ok(m),
Err(err) => last_err = err,
}
}
Err(last_err)
}
async fn lookup_inner(&self, msg: Message) -> io::Result<Message> {
match self.ns {
NameServerAddr::SocketAddr(ns) => {
let mut last_err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
// Query UDP then TCP
if self.mode.enable_udp() {
match self.client_cache.lookup_local(ns, msg.clone(), &self.connect_opts, true)
.await
{
Ok(msg) => return Ok(msg),
Err(err) => {
last_err = err.into();
}
}
}
if self.mode.enable_tcp() {
match self.client_cache.lookup_local(ns, msg, &self.connect_opts, false).await {
Ok(msg) => return Ok(msg),
Err(err) => {
last_err = err.into();
}
}
}
Err(last_err)
}
#[cfg(unix)]
NameServerAddr::UnixSocketAddr(ref path) => self
.client_cache
.lookup_unix_stream(path, msg)
.await
.map_err(From::from),
}
}
}
#[async_trait]
impl DnsResolve for DnsResolver {
async fn resolve(&self, host: &str, port: u16) -> io::Result<Vec<SocketAddr>> {
let mut name = Name::from_utf8(host)?;
name.set_fqdn(true);
let mut queryv4 = Query::new();
queryv4.set_query_class(DNSClass::IN);
queryv4.set_name(name);
let mut queryv6 = queryv4.clone();
queryv4.set_query_type(RecordType::A);
queryv6.set_query_type(RecordType::AAAA);
let mut msgv4 = Message::new();
msgv4.set_recursion_desired(true);
msgv4.add_query(queryv4);
let mut msgv6 = Message::new();
msgv6.set_recursion_desired(true);
msgv6.add_query(queryv6);
match future::join(self.lookup(msgv4), self.lookup(msgv6)).await {
(Err(res_v4), Err(res_v6)) => {
if self.ipv6_first {
Err(res_v6)
} else {
Err(res_v4)
}
}
(res_v4, res_v6) => {
let mut vaddr: Vec<SocketAddr> = vec![];
if self.ipv6_first {
match res_v6 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve AAAA records, error: {}", err),
}
match res_v4 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
} else {
match res_v4 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
match res_v6 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
}
if vaddr.is_empty() {
let err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
return Err(err);
}
Ok(vaddr)
}
}
}
}
fn store_dns(res: Message, port: u16) -> Vec<SocketAddr> {
let mut vaddr = Vec::new();
for record in res.answers() {
match *record.rdata() {
RData::A(addr) => vaddr.push(SocketAddr::new(addr.into(), port)),
RData::AAAA(addr) => vaddr.push(SocketAddr::new(addr.into(), port)),
ref rdata => {
trace!("skipped rdata {:?}", rdata);
}
}
}
vaddr
}
|
mode: Mode,
ipv6_first: bool,
connect_opts: ConnectOpts,
attempts: usize,
|
random_line_split
|
dns_resolver.rs
|
//! Replacement of service's DNS resolver
use std::{
io::{self, ErrorKind},
net::SocketAddr,
};
use async_trait::async_trait;
use futures::future;
use log::{debug, trace};
use trust_dns_resolver::proto::{
op::{Message, Query},
rr::{DNSClass, Name, RData, RecordType},
};
use shadowsocks::{config::Mode, dns_resolver::DnsResolve, net::ConnectOpts};
use super::{client_cache::DnsClientCache, config::NameServerAddr};
pub struct DnsResolver {
ns: NameServerAddr,
client_cache: DnsClientCache,
mode: Mode,
ipv6_first: bool,
connect_opts: ConnectOpts,
attempts: usize,
}
impl DnsResolver {
pub fn
|
(ns: NameServerAddr) -> DnsResolver {
DnsResolver {
ns,
client_cache: DnsClientCache::new(5),
mode: Mode::UdpOnly,
ipv6_first: false,
connect_opts: ConnectOpts::default(),
attempts: 2,
}
}
pub fn set_mode(&mut self, mode: Mode) {
self.mode = mode;
}
pub fn set_ipv6_first(&mut self, ipv6_first: bool) {
self.ipv6_first = ipv6_first;
}
pub fn set_connect_opts(&mut self, connect_opts: ConnectOpts) {
self.connect_opts = connect_opts;
}
async fn lookup(&self, msg: Message) -> io::Result<Message> {
let mut last_err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
for _ in 0..self.attempts {
match self.lookup_inner(msg.clone()).await {
Ok(m) => return Ok(m),
Err(err) => last_err = err,
}
}
Err(last_err)
}
async fn lookup_inner(&self, msg: Message) -> io::Result<Message> {
match self.ns {
NameServerAddr::SocketAddr(ns) => {
let mut last_err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
// Query UDP then TCP
if self.mode.enable_udp() {
match self.client_cache.lookup_local(ns, msg.clone(), &self.connect_opts, true)
.await
{
Ok(msg) => return Ok(msg),
Err(err) => {
last_err = err.into();
}
}
}
if self.mode.enable_tcp() {
match self.client_cache.lookup_local(ns, msg, &self.connect_opts, false).await {
Ok(msg) => return Ok(msg),
Err(err) => {
last_err = err.into();
}
}
}
Err(last_err)
}
#[cfg(unix)]
NameServerAddr::UnixSocketAddr(ref path) => self
.client_cache
.lookup_unix_stream(path, msg)
.await
.map_err(From::from),
}
}
}
#[async_trait]
impl DnsResolve for DnsResolver {
async fn resolve(&self, host: &str, port: u16) -> io::Result<Vec<SocketAddr>> {
let mut name = Name::from_utf8(host)?;
name.set_fqdn(true);
let mut queryv4 = Query::new();
queryv4.set_query_class(DNSClass::IN);
queryv4.set_name(name);
let mut queryv6 = queryv4.clone();
queryv4.set_query_type(RecordType::A);
queryv6.set_query_type(RecordType::AAAA);
let mut msgv4 = Message::new();
msgv4.set_recursion_desired(true);
msgv4.add_query(queryv4);
let mut msgv6 = Message::new();
msgv6.set_recursion_desired(true);
msgv6.add_query(queryv6);
match future::join(self.lookup(msgv4), self.lookup(msgv6)).await {
(Err(res_v4), Err(res_v6)) => {
if self.ipv6_first {
Err(res_v6)
} else {
Err(res_v4)
}
}
(res_v4, res_v6) => {
let mut vaddr: Vec<SocketAddr> = vec![];
if self.ipv6_first {
match res_v6 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve AAAA records, error: {}", err),
}
match res_v4 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
} else {
match res_v4 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
match res_v6 {
Ok(res) => vaddr = store_dns(res, port),
Err(err) => debug!("failed to resolve A records, error: {}", err),
}
}
if vaddr.is_empty() {
let err = io::Error::new(ErrorKind::InvalidData, "resolve empty");
return Err(err);
}
Ok(vaddr)
}
}
}
}
fn store_dns(res: Message, port: u16) -> Vec<SocketAddr> {
let mut vaddr = Vec::new();
for record in res.answers() {
match *record.rdata() {
RData::A(addr) => vaddr.push(SocketAddr::new(addr.into(), port)),
RData::AAAA(addr) => vaddr.push(SocketAddr::new(addr.into(), port)),
ref rdata => {
trace!("skipped rdata {:?}", rdata);
}
}
}
vaddr
}
|
new
|
identifier_name
|
runtime.rs
|
// use std::ffi::CString;
// use std::io;
// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use trust_dns_client::op::Query;
use trust_dns_client::rr::{LowerName, Name, RecordType};
use trust_dns_proto::rr::domain::Name as DomainName;
use trust_dns_proto::{
rr::rdata::mx, rr::rdata::soa, rr::rdata::srv, rr::rdata::txt, rr::RData, rr::Record,
};
// use trust_dns_resolver::config::NameServerConfigGroup;
use trust_dns_resolver::error::{ResolveError, ResolveErrorKind};
use trust_dns_resolver::lookup::Lookup;
use doh_dns::{client::DnsClient, error::DnsError, status::RCode, Dns, DnsAnswer, DnsHttpsServer};
use tokio::runtime::Handle;
use tokio::sync::{RwLock, Semaphore};
use crate::connector::{new_default, new_tunneled, TunneledHyperClient};
enum ResolverSelection {
DefaultRoute(Dns),
Tunneled(Dns<TunneledHyperClient>),
None,
}
pub struct Resolver {
name_servers: Vec<DnsHttpsServer>,
inner: RwLock<ResolverSelection>,
permit: Semaphore,
}
impl Resolver {
pub fn new(
name_servers: Vec<DnsHttpsServer>,
_runtime: &Handle,
) -> Result<Resolver, ResolveError> {
Ok(Resolver {
inner: RwLock::new(ResolverSelection::DefaultRoute(new_default(
name_servers.clone(),
))),
permit: Semaphore::new(50),
name_servers,
})
}
pub async fn lookup(&self, name: LowerName, rtype: RecordType) -> Result<Lookup, ResolveError> {
debug!("resolver waiting for permit");
// Avoid hitting the memory limit by throttling pending lookups.
let _permit = self.permit.acquire();
debug!("resolver waiting for read lock");
let name = Name::from(name);
let result: Result<Vec<DnsAnswer>, DnsError> = match &*self.inner.read().await {
ResolverSelection::None => {
error!("lookup while resolver is disabled");
return Err(ResolveError::from(ResolveErrorKind::Message(
"resolver is disabled",
)));
}
ResolverSelection::Tunneled(resolver) => {
debug!("resolver performing tunneled lookup");
lookup_to_doh_resolve(resolver, name.to_ascii(), rtype).await
}
ResolverSelection::DefaultRoute(resolver) => {
debug!("resolver performing default lookup");
lookup_to_doh_resolve(resolver, name.to_ascii(), rtype).await
}
};
let q = Query::query(name, rtype);
match result {
Ok(records) => {
let lookup = doh_records_to_lookup(q, records)?;
for record in lookup.record_iter() {
debug!("return record: {:?}", record);
}
Ok(lookup)
}
Err(e) => match e {
DnsError::Status(RCode::NXDomain) => {
Err(ResolveError::from(ResolveErrorKind::NoRecordsFound {
query: q,
valid_until: None,
}))
}
_ => Err(ResolveError::from(ResolveErrorKind::Msg(format!(
"error resolving: {}",
e
)))),
},
}
}
pub async fn toggle(&self, tunneled: Option<bool>) {
info!("toggle waiting for read lock");
let needs_update = match &*self.inner.read().await {
ResolverSelection::Tunneled(_) => match tunneled {
Some(tunneled) => tunneled == false,
None => true,
},
ResolverSelection::DefaultRoute(_) => match tunneled {
Some(tunneled) => tunneled == true,
None => true,
},
ResolverSelection::None => tunneled!= None,
};
if!needs_update {
info!("toggle noop");
return;
}
info!("toggle waiting for write lock");
let selected = &mut *self.inner.write().await;
*selected = match tunneled {
Some(tunneled) => {
if tunneled {
info!("toggle tunneled DNS");
ResolverSelection::Tunneled(Dns::new(new_tunneled(self.name_servers.clone())))
} else {
info!("toggle default DNS");
ResolverSelection::DefaultRoute(new_default(self.name_servers.clone()))
}
}
None => {
info!("toggle disabled DNS");
ResolverSelection::None
}
}
}
}
async fn lookup_to_doh_resolve<C: DnsClient>(
resolver: &Dns<C>,
name: String,
rtype: RecordType,
) -> Result<Vec<DnsAnswer>, DnsError> {
match rtype {
RecordType::A => resolver.resolve_a(&name).await,
RecordType::AAAA => resolver.resolve_aaaa(&name).await,
RecordType::CNAME => resolver.resolve_cname(&name).await,
RecordType::MX => resolver.resolve_mx(&name).await,
RecordType::NAPTR => resolver.resolve_naptr(&name).await,
RecordType::NS => resolver.resolve_ns(&name).await,
RecordType::PTR => resolver.resolve_ptr(&name).await,
RecordType::SOA => resolver.resolve_soa(&name).await,
RecordType::SRV => resolver.resolve_srv(&name).await,
RecordType::TXT => resolver.resolve_txt(&name).await,
RecordType::TLSA => resolver.resolve_tlsa(&name).await,
RecordType::CAA => resolver.resolve_caa(&name).await,
RecordType::SSHFP => resolver.resolve_sshfp(&name).await,
// TODO: dnssec types
// RecordType::DNSSEC(rtype) => match rtype {
// }
RecordType::Unknown(rtype) => {
if rtype == 65 {
// Reduce error log spam for this known issue on iOS 14.
debug!("HTTPS record type not implemented");
return Ok(vec![]);
} else {
error!("unknown or invalid record type: {:?}", rtype);
}
return Err(DnsError::InvalidRecordType);
}
_ => {
info!("valid but not yet supported record type: {:?}", rtype);
return Err(DnsError::InvalidRecordType);
}
}
}
fn doh_records_to_lookup(
query: Query,
doh_records: Vec<DnsAnswer>,
) -> Result<Lookup, ResolveError>
|
let mname = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid mname")?;
let rname = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid rname")?;
let serial = parts
.next()
.and_then(|n| n.parse::<u32>().ok())
.ok_or("invalid serial")?;
let refresh = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid refresh")?;
let retry = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid retry")?;
let expire = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid expire")?;
let minimum = parts
.next()
.and_then(|n| n.parse::<u32>().ok())
.ok_or("invalid minimum")?;
Ok(RData::SOA(soa::SOA::new(
mname, rname, serial, refresh, retry, expire, minimum,
)))
}
RecordType::TXT => Ok(RData::TXT(txt::TXT::new(vec![data
.trim_matches('"')
.to_string()]))),
RecordType::SRV => {
// priority: u16, weight: u16, port: u16, target: Name
let mut parts = data.split_ascii_whitespace();
let priority = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid priority")?;
let weight = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid weight")?;
let port = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid port")?;
let target = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid target")?;
Ok(RData::SRV(srv::SRV::new(priority, weight, port, target)))
}
// RecordType::TLSA => rr.set_rdata(RData::TLSA(r.data.parse().unwrap())),
// RecordType::CAA => rr.set_rdata(RData::CAA(r.data.parse().unwrap())),
// RecordType::SSHFP => rr.set_rdata(RData::SSHFP(r.data.parse().unwrap())),
_ => {
debug!("{}", data);
Err("unsupported record type".to_string())
}
};
let records: Vec<Record> = doh_records
.iter()
.filter_map(|r| {
let rtype = RecordType::from(r.r#type as u16);
let mut rr = Record::new();
let name = match Name::from_str(&r.name) {
Ok(name) => name,
Err(e) => {
debug!("could not parse domain name: {}", e);
return None;
}
};
rr.set_name(name).set_ttl(r.TTL).set_record_type(rtype);
match rdata(&r.data, rtype) {
Ok(rdata) => {
rr.set_rdata(rdata);
Some(rr)
}
Err(e) => {
debug!("could not convert record data: {}", e);
None
}
}
})
.collect();
let ttl = match doh_records.iter().min_by_key(|r| r.TTL) {
Some(r) => r.TTL,
None => 10,
};
let valid_until = Instant::now() + Duration::from_secs(ttl.into());
Ok(Lookup::new_with_deadline(
query,
Arc::new(records),
valid_until,
))
}
// async fn connect(addr: SocketAddr) -> Result<AsyncIo02As03<TimeoutStream<TcpStream>>, io::Error> {
// let stream = timeout(Duration::from_secs(5), connect_tunnel(addr)).await?;
// let mut stream = TimeoutStream::new(stream?);
// // Work around for avoiding stalled connections when switching source IPs.
// stream.set_read_timeout(Some(Duration::from_secs(5)));
// stream.set_write_timeout(Some(Duration::from_secs(5)));
// Ok(AsyncIo02As03(stream))
// }
// }
// async fn connect_tunnel(addr: SocketAddr) -> Result<TcpStream, io::Error> {
// task::spawn_blocking(move || {
// // Only bind if we're not connecting to a DNS server on a private network.
// let is_private = match addr.ip() {
// IpAddr::V4(ip) => ip.is_private(),
// IpAddr::V6(_) => false, // uncommon, assume it's not.
// };
// // Figure out which tunnel interface to bind on.
// let nics = network::IfAddrs::get()?;
// let tunnel = nics
// .iter()
// .filter(|nic| nic.name().contains("tun"))
// .find(|nic| match nic.addr() {
// Some(IpAddr::V4(ip)) =>!ip.is_loopback(),
// Some(IpAddr::V6(_)) => false,
// None => false,
// })
// .map(|nic| (nic.name(), nic.addr()));
// match tunnel {
// Some((name, Some(addr))) => info!("binding nameserver connection to {} ({})", name, addr),
// Some((name, None)) => warn!("binding nameserver connection to {} (no addr set?)", name),
// None => warn!("couldn't figure out a tun interface to bind on"),
// }
// let index = |name| {
// let name = CString::new(name)?;
// let index = unsafe { libc::if_nametoindex(name.as_ptr()) };
// if index == 0 {
// Err(io::Error::new(
// io::ErrorKind::NotFound,
// "interface was not found",
// ))
// } else {
// Ok(index)
// }
// };
// let socket = if addr.is_ipv4() {
// let s = Socket::new(Domain::IPV4, Type::STREAM, None)?;
// // Only bind to tunnel interface if we're not targeting a private network.
// if!is_private {
// if let Some((name, _)) = tunnel {
// s.set_bound_interface(index(name)?)?;
// }
// }
// s
// } else {
// let s = Socket::new(Domain::IPV6, Type::STREAM, None)?;
// // TODO: check if private network.
// if let Some((name, _)) = tunnel {
// s.set_bound_interface_v6(index(name)?)?;
// }
// s
// };
// socket.connect(&addr.into())?;
// TcpStream::from_std(socket.into_tcp_stream())
// })
// .await?
// }
#[cfg(test)]
mod test {
use doh_dns::{Dns, DnsHttpsServer};
use std::time::Duration;
#[tokio::test]
async fn doh_servers() {
let cloudflare = (
"cloudflare",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.1".parse().unwrap(),
"2606:4700:4700::1111".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let cloudflare_malware = (
"cloudflare malware",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.2".parse().unwrap(),
"2606:4700:4700::1112".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let cloudflare_adult = (
"cloudflare adult",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.3".parse().unwrap(),
"2606:4700:4700::1113".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let gesellschaft = (
"gesellschaft",
Some(DnsHttpsServer::new(
"dns.digitale-gesellschaft.ch".into(),
"dns-query".into(),
vec![
"185.95.218.42".parse().unwrap(),
"2a05:fc84::4".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let opendns = (
"opendns",
Some(DnsHttpsServer::new(
"doh.opendns.com".into(),
"dns-query".into(),
vec!["208.67.220.220".parse().unwrap()],
Duration::from_secs(5),
)),
);
let opendns_family = (
"opendns family",
Some(DnsHttpsServer::new(
"doh.opendns.com".into(),
"dns-query".into(),
vec!["208.67.222.123".parse().unwrap()],
Duration::from_secs(5),
)),
);
let opennic_usa = (
"opennic usa",
Some(DnsHttpsServer::new(
"ns03.dns.tin-fan.com".into(),
"dns-query".into(),
vec![
"155.138.240.237".parse().unwrap(),
"2001:19f0:6401:b3d:5400:2ff:fe5a:fb9f".parse().unwrap(),
],
Duration::from_secs(5),
)),
);
let opennic_eu = (
"opennic eu",
Some(DnsHttpsServer::new(
"ns01.dns.tin-fan.com".into(),
"dns-query".into(),
vec![
"95.217.16.205".parse().unwrap(),
"2a01:4f9:c010:6093::3485".parse().unwrap(),
],
Duration::from_secs(5),
)),
);
let mut servers = [
cloudflare,
cloudflare_malware,
cloudflare_adult,
gesellschaft,
// opendns,
// opendns_family,
opennic_eu,
opennic_usa,
];
for server in &mut servers {
println!("using: {}", server.0);
let resolver = Dns::with_servers(vec![server.1.take().unwrap()]).unwrap();
let res = resolver.resolve_a("google.com").await.unwrap();
assert_ne!(0, res.len())
}
}
#[tokio::test]
async fn test_txt() {
let resolver = Dns::default();
let result = resolver.resolve_txt("blokada.org").await.unwrap();
for txt in result {
println!("txt: '{}'", txt.data);
}
}
}
|
{
let parse_error = |e| format!("error parsing: {}", e);
let rdata = |data: &str, rtype: RecordType| match rtype {
RecordType::A => Ok(RData::A(data.parse().map_err(parse_error)?)),
RecordType::AAAA => Ok(RData::AAAA(data.parse().map_err(parse_error)?)),
RecordType::CNAME => Ok(RData::CNAME(DomainName::from_str(data)?)),
RecordType::MX => {
let mut parts = data.split_ascii_whitespace();
if let (Some(part_1), Some(part_2)) = (parts.next(), parts.next()) {
if let (Ok(prio), Ok(name)) = (part_1.parse::<u16>(), part_2.parse::<Name>()) {
return Ok(RData::MX(mx::MX::new(prio, name)));
}
}
Err("invalid MX data".to_string())
}
// RecordType::NAPTR => rr.set_rdata(RData::NAPTR(r.data.parse().unwrap())),
RecordType::NS => Ok(RData::NS(data.parse()?)),
RecordType::PTR => Ok(RData::PTR(data.parse()?)),
RecordType::SOA => {
let mut parts = data.split_ascii_whitespace();
|
identifier_body
|
runtime.rs
|
// use std::ffi::CString;
// use std::io;
// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use trust_dns_client::op::Query;
use trust_dns_client::rr::{LowerName, Name, RecordType};
use trust_dns_proto::rr::domain::Name as DomainName;
use trust_dns_proto::{
rr::rdata::mx, rr::rdata::soa, rr::rdata::srv, rr::rdata::txt, rr::RData, rr::Record,
};
// use trust_dns_resolver::config::NameServerConfigGroup;
use trust_dns_resolver::error::{ResolveError, ResolveErrorKind};
use trust_dns_resolver::lookup::Lookup;
use doh_dns::{client::DnsClient, error::DnsError, status::RCode, Dns, DnsAnswer, DnsHttpsServer};
use tokio::runtime::Handle;
use tokio::sync::{RwLock, Semaphore};
use crate::connector::{new_default, new_tunneled, TunneledHyperClient};
enum ResolverSelection {
DefaultRoute(Dns),
Tunneled(Dns<TunneledHyperClient>),
None,
}
pub struct Resolver {
name_servers: Vec<DnsHttpsServer>,
inner: RwLock<ResolverSelection>,
permit: Semaphore,
}
impl Resolver {
pub fn new(
name_servers: Vec<DnsHttpsServer>,
_runtime: &Handle,
) -> Result<Resolver, ResolveError> {
Ok(Resolver {
inner: RwLock::new(ResolverSelection::DefaultRoute(new_default(
name_servers.clone(),
))),
permit: Semaphore::new(50),
name_servers,
})
}
pub async fn lookup(&self, name: LowerName, rtype: RecordType) -> Result<Lookup, ResolveError> {
debug!("resolver waiting for permit");
// Avoid hitting the memory limit by throttling pending lookups.
let _permit = self.permit.acquire();
debug!("resolver waiting for read lock");
let name = Name::from(name);
let result: Result<Vec<DnsAnswer>, DnsError> = match &*self.inner.read().await {
ResolverSelection::None => {
error!("lookup while resolver is disabled");
return Err(ResolveError::from(ResolveErrorKind::Message(
"resolver is disabled",
)));
}
ResolverSelection::Tunneled(resolver) => {
debug!("resolver performing tunneled lookup");
lookup_to_doh_resolve(resolver, name.to_ascii(), rtype).await
}
ResolverSelection::DefaultRoute(resolver) => {
debug!("resolver performing default lookup");
lookup_to_doh_resolve(resolver, name.to_ascii(), rtype).await
}
};
let q = Query::query(name, rtype);
match result {
Ok(records) => {
let lookup = doh_records_to_lookup(q, records)?;
for record in lookup.record_iter() {
debug!("return record: {:?}", record);
}
Ok(lookup)
}
Err(e) => match e {
DnsError::Status(RCode::NXDomain) => {
Err(ResolveError::from(ResolveErrorKind::NoRecordsFound {
query: q,
valid_until: None,
}))
}
_ => Err(ResolveError::from(ResolveErrorKind::Msg(format!(
"error resolving: {}",
e
)))),
},
}
}
pub async fn toggle(&self, tunneled: Option<bool>) {
info!("toggle waiting for read lock");
let needs_update = match &*self.inner.read().await {
ResolverSelection::Tunneled(_) => match tunneled {
Some(tunneled) => tunneled == false,
None => true,
},
ResolverSelection::DefaultRoute(_) => match tunneled {
Some(tunneled) => tunneled == true,
None => true,
},
ResolverSelection::None => tunneled!= None,
};
if!needs_update {
info!("toggle noop");
return;
}
info!("toggle waiting for write lock");
let selected = &mut *self.inner.write().await;
*selected = match tunneled {
Some(tunneled) => {
if tunneled {
info!("toggle tunneled DNS");
ResolverSelection::Tunneled(Dns::new(new_tunneled(self.name_servers.clone())))
} else {
info!("toggle default DNS");
ResolverSelection::DefaultRoute(new_default(self.name_servers.clone()))
}
}
None => {
info!("toggle disabled DNS");
ResolverSelection::None
}
}
}
}
async fn lookup_to_doh_resolve<C: DnsClient>(
resolver: &Dns<C>,
name: String,
rtype: RecordType,
) -> Result<Vec<DnsAnswer>, DnsError> {
match rtype {
RecordType::A => resolver.resolve_a(&name).await,
RecordType::AAAA => resolver.resolve_aaaa(&name).await,
RecordType::CNAME => resolver.resolve_cname(&name).await,
RecordType::MX => resolver.resolve_mx(&name).await,
RecordType::NAPTR => resolver.resolve_naptr(&name).await,
RecordType::NS => resolver.resolve_ns(&name).await,
RecordType::PTR => resolver.resolve_ptr(&name).await,
RecordType::SOA => resolver.resolve_soa(&name).await,
RecordType::SRV => resolver.resolve_srv(&name).await,
RecordType::TXT => resolver.resolve_txt(&name).await,
RecordType::TLSA => resolver.resolve_tlsa(&name).await,
RecordType::CAA => resolver.resolve_caa(&name).await,
RecordType::SSHFP => resolver.resolve_sshfp(&name).await,
// TODO: dnssec types
// RecordType::DNSSEC(rtype) => match rtype {
// }
RecordType::Unknown(rtype) => {
if rtype == 65 {
// Reduce error log spam for this known issue on iOS 14.
debug!("HTTPS record type not implemented");
return Ok(vec![]);
} else {
error!("unknown or invalid record type: {:?}", rtype);
}
return Err(DnsError::InvalidRecordType);
}
_ => {
info!("valid but not yet supported record type: {:?}", rtype);
return Err(DnsError::InvalidRecordType);
}
}
}
fn doh_records_to_lookup(
query: Query,
doh_records: Vec<DnsAnswer>,
) -> Result<Lookup, ResolveError> {
let parse_error = |e| format!("error parsing: {}", e);
let rdata = |data: &str, rtype: RecordType| match rtype {
RecordType::A => Ok(RData::A(data.parse().map_err(parse_error)?)),
RecordType::AAAA => Ok(RData::AAAA(data.parse().map_err(parse_error)?)),
RecordType::CNAME => Ok(RData::CNAME(DomainName::from_str(data)?)),
RecordType::MX => {
let mut parts = data.split_ascii_whitespace();
if let (Some(part_1), Some(part_2)) = (parts.next(), parts.next()) {
if let (Ok(prio), Ok(name)) = (part_1.parse::<u16>(), part_2.parse::<Name>()) {
return Ok(RData::MX(mx::MX::new(prio, name)));
}
}
Err("invalid MX data".to_string())
}
// RecordType::NAPTR => rr.set_rdata(RData::NAPTR(r.data.parse().unwrap())),
RecordType::NS => Ok(RData::NS(data.parse()?)),
RecordType::PTR => Ok(RData::PTR(data.parse()?)),
RecordType::SOA => {
let mut parts = data.split_ascii_whitespace();
let mname = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid mname")?;
let rname = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid rname")?;
let serial = parts
.next()
.and_then(|n| n.parse::<u32>().ok())
.ok_or("invalid serial")?;
let refresh = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid refresh")?;
let retry = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid retry")?;
let expire = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid expire")?;
let minimum = parts
.next()
.and_then(|n| n.parse::<u32>().ok())
.ok_or("invalid minimum")?;
Ok(RData::SOA(soa::SOA::new(
mname, rname, serial, refresh, retry, expire, minimum,
)))
}
RecordType::TXT => Ok(RData::TXT(txt::TXT::new(vec![data
.trim_matches('"')
.to_string()]))),
RecordType::SRV => {
// priority: u16, weight: u16, port: u16, target: Name
let mut parts = data.split_ascii_whitespace();
let priority = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid priority")?;
let weight = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid weight")?;
let port = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid port")?;
let target = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid target")?;
Ok(RData::SRV(srv::SRV::new(priority, weight, port, target)))
}
// RecordType::TLSA => rr.set_rdata(RData::TLSA(r.data.parse().unwrap())),
// RecordType::CAA => rr.set_rdata(RData::CAA(r.data.parse().unwrap())),
// RecordType::SSHFP => rr.set_rdata(RData::SSHFP(r.data.parse().unwrap())),
_ => {
debug!("{}", data);
Err("unsupported record type".to_string())
}
};
let records: Vec<Record> = doh_records
.iter()
.filter_map(|r| {
let rtype = RecordType::from(r.r#type as u16);
let mut rr = Record::new();
let name = match Name::from_str(&r.name) {
Ok(name) => name,
Err(e) => {
debug!("could not parse domain name: {}", e);
return None;
}
};
rr.set_name(name).set_ttl(r.TTL).set_record_type(rtype);
match rdata(&r.data, rtype) {
Ok(rdata) => {
rr.set_rdata(rdata);
Some(rr)
}
Err(e) => {
debug!("could not convert record data: {}", e);
None
}
}
})
.collect();
let ttl = match doh_records.iter().min_by_key(|r| r.TTL) {
Some(r) => r.TTL,
None => 10,
};
let valid_until = Instant::now() + Duration::from_secs(ttl.into());
Ok(Lookup::new_with_deadline(
query,
Arc::new(records),
valid_until,
))
}
// async fn connect(addr: SocketAddr) -> Result<AsyncIo02As03<TimeoutStream<TcpStream>>, io::Error> {
// let stream = timeout(Duration::from_secs(5), connect_tunnel(addr)).await?;
// let mut stream = TimeoutStream::new(stream?);
// // Work around for avoiding stalled connections when switching source IPs.
// stream.set_read_timeout(Some(Duration::from_secs(5)));
// stream.set_write_timeout(Some(Duration::from_secs(5)));
// Ok(AsyncIo02As03(stream))
// }
// }
// async fn connect_tunnel(addr: SocketAddr) -> Result<TcpStream, io::Error> {
// task::spawn_blocking(move || {
// // Only bind if we're not connecting to a DNS server on a private network.
// let is_private = match addr.ip() {
// IpAddr::V4(ip) => ip.is_private(),
// IpAddr::V6(_) => false, // uncommon, assume it's not.
// };
// // Figure out which tunnel interface to bind on.
// let nics = network::IfAddrs::get()?;
// let tunnel = nics
// .iter()
// .filter(|nic| nic.name().contains("tun"))
// .find(|nic| match nic.addr() {
// Some(IpAddr::V4(ip)) =>!ip.is_loopback(),
// Some(IpAddr::V6(_)) => false,
// None => false,
// })
// .map(|nic| (nic.name(), nic.addr()));
// match tunnel {
// Some((name, Some(addr))) => info!("binding nameserver connection to {} ({})", name, addr),
// Some((name, None)) => warn!("binding nameserver connection to {} (no addr set?)", name),
// None => warn!("couldn't figure out a tun interface to bind on"),
// }
// let index = |name| {
// let name = CString::new(name)?;
// let index = unsafe { libc::if_nametoindex(name.as_ptr()) };
// if index == 0 {
// Err(io::Error::new(
// io::ErrorKind::NotFound,
// "interface was not found",
// ))
// } else {
// Ok(index)
// }
// };
// let socket = if addr.is_ipv4() {
// let s = Socket::new(Domain::IPV4, Type::STREAM, None)?;
// // Only bind to tunnel interface if we're not targeting a private network.
// if!is_private {
// if let Some((name, _)) = tunnel {
// s.set_bound_interface(index(name)?)?;
// }
// }
// s
// } else {
// let s = Socket::new(Domain::IPV6, Type::STREAM, None)?;
// // TODO: check if private network.
// if let Some((name, _)) = tunnel {
// s.set_bound_interface_v6(index(name)?)?;
// }
// s
// };
// socket.connect(&addr.into())?;
// TcpStream::from_std(socket.into_tcp_stream())
// })
// .await?
|
mod test {
use doh_dns::{Dns, DnsHttpsServer};
use std::time::Duration;
#[tokio::test]
async fn doh_servers() {
let cloudflare = (
"cloudflare",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.1".parse().unwrap(),
"2606:4700:4700::1111".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let cloudflare_malware = (
"cloudflare malware",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.2".parse().unwrap(),
"2606:4700:4700::1112".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let cloudflare_adult = (
"cloudflare adult",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.3".parse().unwrap(),
"2606:4700:4700::1113".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let gesellschaft = (
"gesellschaft",
Some(DnsHttpsServer::new(
"dns.digitale-gesellschaft.ch".into(),
"dns-query".into(),
vec![
"185.95.218.42".parse().unwrap(),
"2a05:fc84::4".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let opendns = (
"opendns",
Some(DnsHttpsServer::new(
"doh.opendns.com".into(),
"dns-query".into(),
vec!["208.67.220.220".parse().unwrap()],
Duration::from_secs(5),
)),
);
let opendns_family = (
"opendns family",
Some(DnsHttpsServer::new(
"doh.opendns.com".into(),
"dns-query".into(),
vec!["208.67.222.123".parse().unwrap()],
Duration::from_secs(5),
)),
);
let opennic_usa = (
"opennic usa",
Some(DnsHttpsServer::new(
"ns03.dns.tin-fan.com".into(),
"dns-query".into(),
vec![
"155.138.240.237".parse().unwrap(),
"2001:19f0:6401:b3d:5400:2ff:fe5a:fb9f".parse().unwrap(),
],
Duration::from_secs(5),
)),
);
let opennic_eu = (
"opennic eu",
Some(DnsHttpsServer::new(
"ns01.dns.tin-fan.com".into(),
"dns-query".into(),
vec![
"95.217.16.205".parse().unwrap(),
"2a01:4f9:c010:6093::3485".parse().unwrap(),
],
Duration::from_secs(5),
)),
);
let mut servers = [
cloudflare,
cloudflare_malware,
cloudflare_adult,
gesellschaft,
// opendns,
// opendns_family,
opennic_eu,
opennic_usa,
];
for server in &mut servers {
println!("using: {}", server.0);
let resolver = Dns::with_servers(vec![server.1.take().unwrap()]).unwrap();
let res = resolver.resolve_a("google.com").await.unwrap();
assert_ne!(0, res.len())
}
}
#[tokio::test]
async fn test_txt() {
let resolver = Dns::default();
let result = resolver.resolve_txt("blokada.org").await.unwrap();
for txt in result {
println!("txt: '{}'", txt.data);
}
}
}
|
// }
#[cfg(test)]
|
random_line_split
|
runtime.rs
|
// use std::ffi::CString;
// use std::io;
// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant};
use trust_dns_client::op::Query;
use trust_dns_client::rr::{LowerName, Name, RecordType};
use trust_dns_proto::rr::domain::Name as DomainName;
use trust_dns_proto::{
rr::rdata::mx, rr::rdata::soa, rr::rdata::srv, rr::rdata::txt, rr::RData, rr::Record,
};
// use trust_dns_resolver::config::NameServerConfigGroup;
use trust_dns_resolver::error::{ResolveError, ResolveErrorKind};
use trust_dns_resolver::lookup::Lookup;
use doh_dns::{client::DnsClient, error::DnsError, status::RCode, Dns, DnsAnswer, DnsHttpsServer};
use tokio::runtime::Handle;
use tokio::sync::{RwLock, Semaphore};
use crate::connector::{new_default, new_tunneled, TunneledHyperClient};
enum ResolverSelection {
DefaultRoute(Dns),
Tunneled(Dns<TunneledHyperClient>),
None,
}
pub struct Resolver {
name_servers: Vec<DnsHttpsServer>,
inner: RwLock<ResolverSelection>,
permit: Semaphore,
}
impl Resolver {
pub fn new(
name_servers: Vec<DnsHttpsServer>,
_runtime: &Handle,
) -> Result<Resolver, ResolveError> {
Ok(Resolver {
inner: RwLock::new(ResolverSelection::DefaultRoute(new_default(
name_servers.clone(),
))),
permit: Semaphore::new(50),
name_servers,
})
}
pub async fn lookup(&self, name: LowerName, rtype: RecordType) -> Result<Lookup, ResolveError> {
debug!("resolver waiting for permit");
// Avoid hitting the memory limit by throttling pending lookups.
let _permit = self.permit.acquire();
debug!("resolver waiting for read lock");
let name = Name::from(name);
let result: Result<Vec<DnsAnswer>, DnsError> = match &*self.inner.read().await {
ResolverSelection::None => {
error!("lookup while resolver is disabled");
return Err(ResolveError::from(ResolveErrorKind::Message(
"resolver is disabled",
)));
}
ResolverSelection::Tunneled(resolver) => {
debug!("resolver performing tunneled lookup");
lookup_to_doh_resolve(resolver, name.to_ascii(), rtype).await
}
ResolverSelection::DefaultRoute(resolver) => {
debug!("resolver performing default lookup");
lookup_to_doh_resolve(resolver, name.to_ascii(), rtype).await
}
};
let q = Query::query(name, rtype);
match result {
Ok(records) => {
let lookup = doh_records_to_lookup(q, records)?;
for record in lookup.record_iter() {
debug!("return record: {:?}", record);
}
Ok(lookup)
}
Err(e) => match e {
DnsError::Status(RCode::NXDomain) => {
Err(ResolveError::from(ResolveErrorKind::NoRecordsFound {
query: q,
valid_until: None,
}))
}
_ => Err(ResolveError::from(ResolveErrorKind::Msg(format!(
"error resolving: {}",
e
)))),
},
}
}
pub async fn toggle(&self, tunneled: Option<bool>) {
info!("toggle waiting for read lock");
let needs_update = match &*self.inner.read().await {
ResolverSelection::Tunneled(_) => match tunneled {
Some(tunneled) => tunneled == false,
None => true,
},
ResolverSelection::DefaultRoute(_) => match tunneled {
Some(tunneled) => tunneled == true,
None => true,
},
ResolverSelection::None => tunneled!= None,
};
if!needs_update {
info!("toggle noop");
return;
}
info!("toggle waiting for write lock");
let selected = &mut *self.inner.write().await;
*selected = match tunneled {
Some(tunneled) => {
if tunneled {
info!("toggle tunneled DNS");
ResolverSelection::Tunneled(Dns::new(new_tunneled(self.name_servers.clone())))
} else {
info!("toggle default DNS");
ResolverSelection::DefaultRoute(new_default(self.name_servers.clone()))
}
}
None => {
info!("toggle disabled DNS");
ResolverSelection::None
}
}
}
}
async fn lookup_to_doh_resolve<C: DnsClient>(
resolver: &Dns<C>,
name: String,
rtype: RecordType,
) -> Result<Vec<DnsAnswer>, DnsError> {
match rtype {
RecordType::A => resolver.resolve_a(&name).await,
RecordType::AAAA => resolver.resolve_aaaa(&name).await,
RecordType::CNAME => resolver.resolve_cname(&name).await,
RecordType::MX => resolver.resolve_mx(&name).await,
RecordType::NAPTR => resolver.resolve_naptr(&name).await,
RecordType::NS => resolver.resolve_ns(&name).await,
RecordType::PTR => resolver.resolve_ptr(&name).await,
RecordType::SOA => resolver.resolve_soa(&name).await,
RecordType::SRV => resolver.resolve_srv(&name).await,
RecordType::TXT => resolver.resolve_txt(&name).await,
RecordType::TLSA => resolver.resolve_tlsa(&name).await,
RecordType::CAA => resolver.resolve_caa(&name).await,
RecordType::SSHFP => resolver.resolve_sshfp(&name).await,
// TODO: dnssec types
// RecordType::DNSSEC(rtype) => match rtype {
// }
RecordType::Unknown(rtype) => {
if rtype == 65 {
// Reduce error log spam for this known issue on iOS 14.
debug!("HTTPS record type not implemented");
return Ok(vec![]);
} else {
error!("unknown or invalid record type: {:?}", rtype);
}
return Err(DnsError::InvalidRecordType);
}
_ => {
info!("valid but not yet supported record type: {:?}", rtype);
return Err(DnsError::InvalidRecordType);
}
}
}
fn doh_records_to_lookup(
query: Query,
doh_records: Vec<DnsAnswer>,
) -> Result<Lookup, ResolveError> {
let parse_error = |e| format!("error parsing: {}", e);
let rdata = |data: &str, rtype: RecordType| match rtype {
RecordType::A => Ok(RData::A(data.parse().map_err(parse_error)?)),
RecordType::AAAA => Ok(RData::AAAA(data.parse().map_err(parse_error)?)),
RecordType::CNAME => Ok(RData::CNAME(DomainName::from_str(data)?)),
RecordType::MX => {
let mut parts = data.split_ascii_whitespace();
if let (Some(part_1), Some(part_2)) = (parts.next(), parts.next()) {
if let (Ok(prio), Ok(name)) = (part_1.parse::<u16>(), part_2.parse::<Name>()) {
return Ok(RData::MX(mx::MX::new(prio, name)));
}
}
Err("invalid MX data".to_string())
}
// RecordType::NAPTR => rr.set_rdata(RData::NAPTR(r.data.parse().unwrap())),
RecordType::NS => Ok(RData::NS(data.parse()?)),
RecordType::PTR => Ok(RData::PTR(data.parse()?)),
RecordType::SOA => {
let mut parts = data.split_ascii_whitespace();
let mname = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid mname")?;
let rname = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid rname")?;
let serial = parts
.next()
.and_then(|n| n.parse::<u32>().ok())
.ok_or("invalid serial")?;
let refresh = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid refresh")?;
let retry = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid retry")?;
let expire = parts
.next()
.and_then(|n| n.parse::<i32>().ok())
.ok_or("invalid expire")?;
let minimum = parts
.next()
.and_then(|n| n.parse::<u32>().ok())
.ok_or("invalid minimum")?;
Ok(RData::SOA(soa::SOA::new(
mname, rname, serial, refresh, retry, expire, minimum,
)))
}
RecordType::TXT => Ok(RData::TXT(txt::TXT::new(vec![data
.trim_matches('"')
.to_string()]))),
RecordType::SRV => {
// priority: u16, weight: u16, port: u16, target: Name
let mut parts = data.split_ascii_whitespace();
let priority = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid priority")?;
let weight = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid weight")?;
let port = parts
.next()
.and_then(|n| n.parse::<u16>().ok())
.ok_or("invalid port")?;
let target = parts
.next()
.and_then(|n| n.parse::<Name>().ok())
.ok_or("invalid target")?;
Ok(RData::SRV(srv::SRV::new(priority, weight, port, target)))
}
// RecordType::TLSA => rr.set_rdata(RData::TLSA(r.data.parse().unwrap())),
// RecordType::CAA => rr.set_rdata(RData::CAA(r.data.parse().unwrap())),
// RecordType::SSHFP => rr.set_rdata(RData::SSHFP(r.data.parse().unwrap())),
_ => {
debug!("{}", data);
Err("unsupported record type".to_string())
}
};
let records: Vec<Record> = doh_records
.iter()
.filter_map(|r| {
let rtype = RecordType::from(r.r#type as u16);
let mut rr = Record::new();
let name = match Name::from_str(&r.name) {
Ok(name) => name,
Err(e) => {
debug!("could not parse domain name: {}", e);
return None;
}
};
rr.set_name(name).set_ttl(r.TTL).set_record_type(rtype);
match rdata(&r.data, rtype) {
Ok(rdata) => {
rr.set_rdata(rdata);
Some(rr)
}
Err(e) => {
debug!("could not convert record data: {}", e);
None
}
}
})
.collect();
let ttl = match doh_records.iter().min_by_key(|r| r.TTL) {
Some(r) => r.TTL,
None => 10,
};
let valid_until = Instant::now() + Duration::from_secs(ttl.into());
Ok(Lookup::new_with_deadline(
query,
Arc::new(records),
valid_until,
))
}
// async fn connect(addr: SocketAddr) -> Result<AsyncIo02As03<TimeoutStream<TcpStream>>, io::Error> {
// let stream = timeout(Duration::from_secs(5), connect_tunnel(addr)).await?;
// let mut stream = TimeoutStream::new(stream?);
// // Work around for avoiding stalled connections when switching source IPs.
// stream.set_read_timeout(Some(Duration::from_secs(5)));
// stream.set_write_timeout(Some(Duration::from_secs(5)));
// Ok(AsyncIo02As03(stream))
// }
// }
// async fn connect_tunnel(addr: SocketAddr) -> Result<TcpStream, io::Error> {
// task::spawn_blocking(move || {
// // Only bind if we're not connecting to a DNS server on a private network.
// let is_private = match addr.ip() {
// IpAddr::V4(ip) => ip.is_private(),
// IpAddr::V6(_) => false, // uncommon, assume it's not.
// };
// // Figure out which tunnel interface to bind on.
// let nics = network::IfAddrs::get()?;
// let tunnel = nics
// .iter()
// .filter(|nic| nic.name().contains("tun"))
// .find(|nic| match nic.addr() {
// Some(IpAddr::V4(ip)) =>!ip.is_loopback(),
// Some(IpAddr::V6(_)) => false,
// None => false,
// })
// .map(|nic| (nic.name(), nic.addr()));
// match tunnel {
// Some((name, Some(addr))) => info!("binding nameserver connection to {} ({})", name, addr),
// Some((name, None)) => warn!("binding nameserver connection to {} (no addr set?)", name),
// None => warn!("couldn't figure out a tun interface to bind on"),
// }
// let index = |name| {
// let name = CString::new(name)?;
// let index = unsafe { libc::if_nametoindex(name.as_ptr()) };
// if index == 0 {
// Err(io::Error::new(
// io::ErrorKind::NotFound,
// "interface was not found",
// ))
// } else {
// Ok(index)
// }
// };
// let socket = if addr.is_ipv4() {
// let s = Socket::new(Domain::IPV4, Type::STREAM, None)?;
// // Only bind to tunnel interface if we're not targeting a private network.
// if!is_private {
// if let Some((name, _)) = tunnel {
// s.set_bound_interface(index(name)?)?;
// }
// }
// s
// } else {
// let s = Socket::new(Domain::IPV6, Type::STREAM, None)?;
// // TODO: check if private network.
// if let Some((name, _)) = tunnel {
// s.set_bound_interface_v6(index(name)?)?;
// }
// s
// };
// socket.connect(&addr.into())?;
// TcpStream::from_std(socket.into_tcp_stream())
// })
// .await?
// }
#[cfg(test)]
mod test {
use doh_dns::{Dns, DnsHttpsServer};
use std::time::Duration;
#[tokio::test]
async fn doh_servers() {
let cloudflare = (
"cloudflare",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.1".parse().unwrap(),
"2606:4700:4700::1111".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let cloudflare_malware = (
"cloudflare malware",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.2".parse().unwrap(),
"2606:4700:4700::1112".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let cloudflare_adult = (
"cloudflare adult",
Some(DnsHttpsServer::new(
"cloudflare-dns.com".into(),
"dns-query".into(),
vec![
"1.1.1.3".parse().unwrap(),
"2606:4700:4700::1113".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let gesellschaft = (
"gesellschaft",
Some(DnsHttpsServer::new(
"dns.digitale-gesellschaft.ch".into(),
"dns-query".into(),
vec![
"185.95.218.42".parse().unwrap(),
"2a05:fc84::4".parse().unwrap(),
],
Duration::from_secs(1),
)),
);
let opendns = (
"opendns",
Some(DnsHttpsServer::new(
"doh.opendns.com".into(),
"dns-query".into(),
vec!["208.67.220.220".parse().unwrap()],
Duration::from_secs(5),
)),
);
let opendns_family = (
"opendns family",
Some(DnsHttpsServer::new(
"doh.opendns.com".into(),
"dns-query".into(),
vec!["208.67.222.123".parse().unwrap()],
Duration::from_secs(5),
)),
);
let opennic_usa = (
"opennic usa",
Some(DnsHttpsServer::new(
"ns03.dns.tin-fan.com".into(),
"dns-query".into(),
vec![
"155.138.240.237".parse().unwrap(),
"2001:19f0:6401:b3d:5400:2ff:fe5a:fb9f".parse().unwrap(),
],
Duration::from_secs(5),
)),
);
let opennic_eu = (
"opennic eu",
Some(DnsHttpsServer::new(
"ns01.dns.tin-fan.com".into(),
"dns-query".into(),
vec![
"95.217.16.205".parse().unwrap(),
"2a01:4f9:c010:6093::3485".parse().unwrap(),
],
Duration::from_secs(5),
)),
);
let mut servers = [
cloudflare,
cloudflare_malware,
cloudflare_adult,
gesellschaft,
// opendns,
// opendns_family,
opennic_eu,
opennic_usa,
];
for server in &mut servers {
println!("using: {}", server.0);
let resolver = Dns::with_servers(vec![server.1.take().unwrap()]).unwrap();
let res = resolver.resolve_a("google.com").await.unwrap();
assert_ne!(0, res.len())
}
}
#[tokio::test]
async fn
|
() {
let resolver = Dns::default();
let result = resolver.resolve_txt("blokada.org").await.unwrap();
for txt in result {
println!("txt: '{}'", txt.data);
}
}
}
|
test_txt
|
identifier_name
|
platform_x86_64.rs
|
#[cfg(not(test))]
use alloc::format;
#[cfg(not(test))]
use alloc::prelude::v1::*;
#[cfg(test)]
use std::prelude::v1::*;
use crate::debugshell::{Command, DebugShell};
pub fn builtin_exit_qemu(_sh: &mut DebugShell, _: Vec<String>) -> String {
|
format!("asked qemu nicely to exit!\n")
}
pub fn builtin_vga_scrollback_data(_sh: &mut DebugShell, _: Vec<String>) -> String {
use x86_64::instructions::interrupts::without_interrupts;
without_interrupts(|| {
use crate::platform::x86_64::vga::VGA_WRITER;
let vga = VGA_WRITER.get();
if let Some(ref sb) = vga.scrollback {
let mut scrollback = sb.clone();
let len = scrollback.len();
format!(
"vga scrollback: len={} last={:?}\n",
len,
scrollback.remove(len - 2)
)
} else {
"vga scrollback is not enabled!\n".into()
}
})
}
pub fn register_platform_builtins(sh: &mut DebugShell) {
sh.commands.push(Command {
name: String::from("vga-scrollback?"),
func: builtin_vga_scrollback_data,
});
sh.commands.push(Command {
name: String::from("exit-qemu!"),
func: builtin_exit_qemu,
});
}
|
use crate::platform::x86_64::exit_qemu;
unsafe {
exit_qemu();
}
|
random_line_split
|
platform_x86_64.rs
|
#[cfg(not(test))]
use alloc::format;
#[cfg(not(test))]
use alloc::prelude::v1::*;
#[cfg(test)]
use std::prelude::v1::*;
use crate::debugshell::{Command, DebugShell};
pub fn builtin_exit_qemu(_sh: &mut DebugShell, _: Vec<String>) -> String {
use crate::platform::x86_64::exit_qemu;
unsafe {
exit_qemu();
}
format!("asked qemu nicely to exit!\n")
}
pub fn builtin_vga_scrollback_data(_sh: &mut DebugShell, _: Vec<String>) -> String {
use x86_64::instructions::interrupts::without_interrupts;
without_interrupts(|| {
use crate::platform::x86_64::vga::VGA_WRITER;
let vga = VGA_WRITER.get();
if let Some(ref sb) = vga.scrollback {
let mut scrollback = sb.clone();
let len = scrollback.len();
format!(
"vga scrollback: len={} last={:?}\n",
len,
scrollback.remove(len - 2)
)
} else
|
})
}
pub fn register_platform_builtins(sh: &mut DebugShell) {
sh.commands.push(Command {
name: String::from("vga-scrollback?"),
func: builtin_vga_scrollback_data,
});
sh.commands.push(Command {
name: String::from("exit-qemu!"),
func: builtin_exit_qemu,
});
}
|
{
"vga scrollback is not enabled!\n".into()
}
|
conditional_block
|
platform_x86_64.rs
|
#[cfg(not(test))]
use alloc::format;
#[cfg(not(test))]
use alloc::prelude::v1::*;
#[cfg(test)]
use std::prelude::v1::*;
use crate::debugshell::{Command, DebugShell};
pub fn builtin_exit_qemu(_sh: &mut DebugShell, _: Vec<String>) -> String
|
pub fn builtin_vga_scrollback_data(_sh: &mut DebugShell, _: Vec<String>) -> String {
use x86_64::instructions::interrupts::without_interrupts;
without_interrupts(|| {
use crate::platform::x86_64::vga::VGA_WRITER;
let vga = VGA_WRITER.get();
if let Some(ref sb) = vga.scrollback {
let mut scrollback = sb.clone();
let len = scrollback.len();
format!(
"vga scrollback: len={} last={:?}\n",
len,
scrollback.remove(len - 2)
)
} else {
"vga scrollback is not enabled!\n".into()
}
})
}
pub fn register_platform_builtins(sh: &mut DebugShell) {
sh.commands.push(Command {
name: String::from("vga-scrollback?"),
func: builtin_vga_scrollback_data,
});
sh.commands.push(Command {
name: String::from("exit-qemu!"),
func: builtin_exit_qemu,
});
}
|
{
use crate::platform::x86_64::exit_qemu;
unsafe {
exit_qemu();
}
format!("asked qemu nicely to exit!\n")
}
|
identifier_body
|
platform_x86_64.rs
|
#[cfg(not(test))]
use alloc::format;
#[cfg(not(test))]
use alloc::prelude::v1::*;
#[cfg(test)]
use std::prelude::v1::*;
use crate::debugshell::{Command, DebugShell};
pub fn builtin_exit_qemu(_sh: &mut DebugShell, _: Vec<String>) -> String {
use crate::platform::x86_64::exit_qemu;
unsafe {
exit_qemu();
}
format!("asked qemu nicely to exit!\n")
}
pub fn builtin_vga_scrollback_data(_sh: &mut DebugShell, _: Vec<String>) -> String {
use x86_64::instructions::interrupts::without_interrupts;
without_interrupts(|| {
use crate::platform::x86_64::vga::VGA_WRITER;
let vga = VGA_WRITER.get();
if let Some(ref sb) = vga.scrollback {
let mut scrollback = sb.clone();
let len = scrollback.len();
format!(
"vga scrollback: len={} last={:?}\n",
len,
scrollback.remove(len - 2)
)
} else {
"vga scrollback is not enabled!\n".into()
}
})
}
pub fn
|
(sh: &mut DebugShell) {
sh.commands.push(Command {
name: String::from("vga-scrollback?"),
func: builtin_vga_scrollback_data,
});
sh.commands.push(Command {
name: String::from("exit-qemu!"),
func: builtin_exit_qemu,
});
}
|
register_platform_builtins
|
identifier_name
|
one_time_server.rs
|
//! A very simple HTTP server which responds to only one connection with the plain text "Hello, World!".
#![crate_name = "one_time_server"]
#![allow(unstable)]
extern crate time;
extern crate http;
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::Writer;
use http::server::{Config, Server, Request, ResponseWriter};
use http::headers::content_type::MediaType;
#[derive(Clone)]
struct HelloWorldServer;
impl Server for HelloWorldServer {
fn get_config(&self) -> Config {
Config { bind_address: SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 8001 } }
}
fn handle_request(&self, _r: Request, w: &mut ResponseWriter) {
w.headers.date = Some(time::now_utc());
w.headers.content_length = Some(14);
w.headers.content_type = Some(MediaType {
type_: String::from_str("text"),
subtype: String::from_str("plain"),
parameters: vec!((String::from_str("charset"), String::from_str("UTF-8")))
});
w.headers.server = Some(String::from_str("Example"));
w.write(b"Hello, World!\n").unwrap();
|
Ok(_) => println!("done serving"),
Err(e) => println!("failed to serve: {}", e)
}
}
|
}
}
fn main() {
match HelloWorldServer.serve_once(true, None) {
|
random_line_split
|
one_time_server.rs
|
//! A very simple HTTP server which responds to only one connection with the plain text "Hello, World!".
#![crate_name = "one_time_server"]
#![allow(unstable)]
extern crate time;
extern crate http;
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::Writer;
use http::server::{Config, Server, Request, ResponseWriter};
use http::headers::content_type::MediaType;
#[derive(Clone)]
struct HelloWorldServer;
impl Server for HelloWorldServer {
fn get_config(&self) -> Config
|
fn handle_request(&self, _r: Request, w: &mut ResponseWriter) {
w.headers.date = Some(time::now_utc());
w.headers.content_length = Some(14);
w.headers.content_type = Some(MediaType {
type_: String::from_str("text"),
subtype: String::from_str("plain"),
parameters: vec!((String::from_str("charset"), String::from_str("UTF-8")))
});
w.headers.server = Some(String::from_str("Example"));
w.write(b"Hello, World!\n").unwrap();
}
}
fn main() {
match HelloWorldServer.serve_once(true, None) {
Ok(_) => println!("done serving"),
Err(e) => println!("failed to serve: {}", e)
}
}
|
{
Config { bind_address: SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 8001 } }
}
|
identifier_body
|
one_time_server.rs
|
//! A very simple HTTP server which responds to only one connection with the plain text "Hello, World!".
#![crate_name = "one_time_server"]
#![allow(unstable)]
extern crate time;
extern crate http;
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::Writer;
use http::server::{Config, Server, Request, ResponseWriter};
use http::headers::content_type::MediaType;
#[derive(Clone)]
struct HelloWorldServer;
impl Server for HelloWorldServer {
fn get_config(&self) -> Config {
Config { bind_address: SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 8001 } }
}
fn handle_request(&self, _r: Request, w: &mut ResponseWriter) {
w.headers.date = Some(time::now_utc());
w.headers.content_length = Some(14);
w.headers.content_type = Some(MediaType {
type_: String::from_str("text"),
subtype: String::from_str("plain"),
parameters: vec!((String::from_str("charset"), String::from_str("UTF-8")))
});
w.headers.server = Some(String::from_str("Example"));
w.write(b"Hello, World!\n").unwrap();
}
}
fn
|
() {
match HelloWorldServer.serve_once(true, None) {
Ok(_) => println!("done serving"),
Err(e) => println!("failed to serve: {}", e)
}
}
|
main
|
identifier_name
|
server.rs
|
use rocket;
use rocket::http::RawStr;
use rocket::request::{FromParam, Request};
use rocket::response::{self, NamedFile, Redirect, Responder};
use scheduled_executor::ThreadPoolExecutor;
use cache::Cache;
use config::Config;
use error::*;
use live_consumer::{self, LiveConsumerStore};
use metadata::ClusterId;
use utils::{GZip, RequestLogger};
use web_server::api;
use web_server::pages;
use std;
use std::path::{Path, PathBuf};
#[get("/")]
fn index() -> Redirect {
Redirect::to("/clusters")
}
// Make ClusterId a valid parameter
impl<'a> FromParam<'a> for ClusterId {
type Error = ();
fn from_param(param: &'a RawStr) -> std::result::Result<Self, Self::Error> {
Ok(param.as_str().into())
}
}
#[get("/public/<file..>")]
fn files(file: PathBuf) -> Option<CachedFile> {
NamedFile::open(Path::new("resources/web_server/public/").join(file))
.map(CachedFile::from)
.ok()
}
#[get("/public/<file..>?<version>")]
fn files_v(file: PathBuf, version: &RawStr) -> Option<CachedFile>
|
pub struct CachedFile {
ttl: usize,
file: NamedFile,
}
impl CachedFile {
pub fn from(file: NamedFile) -> CachedFile {
CachedFile::with_ttl(1800, file)
}
pub fn with_ttl(ttl: usize, file: NamedFile) -> CachedFile {
CachedFile { ttl, file }
}
}
impl<'a> Responder<'a> for CachedFile {
fn respond_to(self, request: &Request) -> response::Result<'a> {
let inner_response = self.file.respond_to(request).unwrap(); // fixme
response::Response::build_from(inner_response)
.raw_header(
"Cache-Control",
format!("max-age={}, must-revalidate", self.ttl),
)
.ok()
}
}
pub fn run_server(executor: &ThreadPoolExecutor, cache: Cache, config: &Config) -> Result<()> {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("?");
info!(
"Starting kafka-view v{}, listening on {}:{}.",
version, config.listen_host, config.listen_port
);
let rocket_env = rocket::config::Environment::active()
.chain_err(|| "Invalid ROCKET_ENV environment variable")?;
let rocket_config = rocket::config::Config::build(rocket_env)
.address(config.listen_host.to_owned())
.port(config.listen_port)
.workers(4)
.finalize()
.chain_err(|| "Invalid rocket configuration")?;
rocket::custom(rocket_config)
.attach(GZip)
.attach(RequestLogger)
.manage(cache)
.manage(config.clone())
.manage(LiveConsumerStore::new(executor.clone()))
.mount(
"/",
routes![
index,
files,
files_v,
pages::cluster::cluster_page,
pages::cluster::broker_page,
pages::clusters::clusters_page,
pages::group::group_page,
pages::internals::caches_page,
pages::internals::live_consumers_page,
pages::omnisearch::consumer_search,
pages::omnisearch::consumer_search_p,
pages::omnisearch::omnisearch,
pages::omnisearch::omnisearch_p,
pages::omnisearch::topic_search,
pages::omnisearch::topic_search_p,
pages::topic::topic_page,
api::brokers,
api::cache_brokers,
api::cache_metrics,
api::cache_offsets,
api::cluster_reassignment,
api::live_consumers,
api::cluster_groups,
api::cluster_topics,
api::consumer_search,
api::group_members,
api::group_offsets,
api::topic_groups,
api::topic_search,
api::topic_topology,
live_consumer::topic_tailer_api,
],
)
.launch();
Ok(())
}
|
{
let _ = version; // just ignore version
NamedFile::open(Path::new("resources/web_server/public/").join(file))
.map(CachedFile::from)
.ok()
}
|
identifier_body
|
server.rs
|
use rocket;
use rocket::http::RawStr;
use rocket::request::{FromParam, Request};
use rocket::response::{self, NamedFile, Redirect, Responder};
use scheduled_executor::ThreadPoolExecutor;
use cache::Cache;
use config::Config;
use error::*;
use live_consumer::{self, LiveConsumerStore};
use metadata::ClusterId;
use utils::{GZip, RequestLogger};
use web_server::api;
use web_server::pages;
use std;
use std::path::{Path, PathBuf};
#[get("/")]
fn index() -> Redirect {
Redirect::to("/clusters")
}
// Make ClusterId a valid parameter
impl<'a> FromParam<'a> for ClusterId {
type Error = ();
fn from_param(param: &'a RawStr) -> std::result::Result<Self, Self::Error> {
Ok(param.as_str().into())
}
}
#[get("/public/<file..>")]
fn files(file: PathBuf) -> Option<CachedFile> {
NamedFile::open(Path::new("resources/web_server/public/").join(file))
.map(CachedFile::from)
.ok()
}
#[get("/public/<file..>?<version>")]
fn files_v(file: PathBuf, version: &RawStr) -> Option<CachedFile> {
let _ = version; // just ignore version
NamedFile::open(Path::new("resources/web_server/public/").join(file))
.map(CachedFile::from)
.ok()
}
pub struct CachedFile {
ttl: usize,
file: NamedFile,
}
impl CachedFile {
pub fn from(file: NamedFile) -> CachedFile {
CachedFile::with_ttl(1800, file)
}
pub fn with_ttl(ttl: usize, file: NamedFile) -> CachedFile {
CachedFile { ttl, file }
}
}
impl<'a> Responder<'a> for CachedFile {
fn respond_to(self, request: &Request) -> response::Result<'a> {
let inner_response = self.file.respond_to(request).unwrap(); // fixme
response::Response::build_from(inner_response)
.raw_header(
|
}
pub fn run_server(executor: &ThreadPoolExecutor, cache: Cache, config: &Config) -> Result<()> {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("?");
info!(
"Starting kafka-view v{}, listening on {}:{}.",
version, config.listen_host, config.listen_port
);
let rocket_env = rocket::config::Environment::active()
.chain_err(|| "Invalid ROCKET_ENV environment variable")?;
let rocket_config = rocket::config::Config::build(rocket_env)
.address(config.listen_host.to_owned())
.port(config.listen_port)
.workers(4)
.finalize()
.chain_err(|| "Invalid rocket configuration")?;
rocket::custom(rocket_config)
.attach(GZip)
.attach(RequestLogger)
.manage(cache)
.manage(config.clone())
.manage(LiveConsumerStore::new(executor.clone()))
.mount(
"/",
routes![
index,
files,
files_v,
pages::cluster::cluster_page,
pages::cluster::broker_page,
pages::clusters::clusters_page,
pages::group::group_page,
pages::internals::caches_page,
pages::internals::live_consumers_page,
pages::omnisearch::consumer_search,
pages::omnisearch::consumer_search_p,
pages::omnisearch::omnisearch,
pages::omnisearch::omnisearch_p,
pages::omnisearch::topic_search,
pages::omnisearch::topic_search_p,
pages::topic::topic_page,
api::brokers,
api::cache_brokers,
api::cache_metrics,
api::cache_offsets,
api::cluster_reassignment,
api::live_consumers,
api::cluster_groups,
api::cluster_topics,
api::consumer_search,
api::group_members,
api::group_offsets,
api::topic_groups,
api::topic_search,
api::topic_topology,
live_consumer::topic_tailer_api,
],
)
.launch();
Ok(())
}
|
"Cache-Control",
format!("max-age={}, must-revalidate", self.ttl),
)
.ok()
}
|
random_line_split
|
server.rs
|
use rocket;
use rocket::http::RawStr;
use rocket::request::{FromParam, Request};
use rocket::response::{self, NamedFile, Redirect, Responder};
use scheduled_executor::ThreadPoolExecutor;
use cache::Cache;
use config::Config;
use error::*;
use live_consumer::{self, LiveConsumerStore};
use metadata::ClusterId;
use utils::{GZip, RequestLogger};
use web_server::api;
use web_server::pages;
use std;
use std::path::{Path, PathBuf};
#[get("/")]
fn index() -> Redirect {
Redirect::to("/clusters")
}
// Make ClusterId a valid parameter
impl<'a> FromParam<'a> for ClusterId {
type Error = ();
fn from_param(param: &'a RawStr) -> std::result::Result<Self, Self::Error> {
Ok(param.as_str().into())
}
}
#[get("/public/<file..>")]
fn files(file: PathBuf) -> Option<CachedFile> {
NamedFile::open(Path::new("resources/web_server/public/").join(file))
.map(CachedFile::from)
.ok()
}
#[get("/public/<file..>?<version>")]
fn files_v(file: PathBuf, version: &RawStr) -> Option<CachedFile> {
let _ = version; // just ignore version
NamedFile::open(Path::new("resources/web_server/public/").join(file))
.map(CachedFile::from)
.ok()
}
pub struct
|
{
ttl: usize,
file: NamedFile,
}
impl CachedFile {
pub fn from(file: NamedFile) -> CachedFile {
CachedFile::with_ttl(1800, file)
}
pub fn with_ttl(ttl: usize, file: NamedFile) -> CachedFile {
CachedFile { ttl, file }
}
}
impl<'a> Responder<'a> for CachedFile {
fn respond_to(self, request: &Request) -> response::Result<'a> {
let inner_response = self.file.respond_to(request).unwrap(); // fixme
response::Response::build_from(inner_response)
.raw_header(
"Cache-Control",
format!("max-age={}, must-revalidate", self.ttl),
)
.ok()
}
}
pub fn run_server(executor: &ThreadPoolExecutor, cache: Cache, config: &Config) -> Result<()> {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("?");
info!(
"Starting kafka-view v{}, listening on {}:{}.",
version, config.listen_host, config.listen_port
);
let rocket_env = rocket::config::Environment::active()
.chain_err(|| "Invalid ROCKET_ENV environment variable")?;
let rocket_config = rocket::config::Config::build(rocket_env)
.address(config.listen_host.to_owned())
.port(config.listen_port)
.workers(4)
.finalize()
.chain_err(|| "Invalid rocket configuration")?;
rocket::custom(rocket_config)
.attach(GZip)
.attach(RequestLogger)
.manage(cache)
.manage(config.clone())
.manage(LiveConsumerStore::new(executor.clone()))
.mount(
"/",
routes![
index,
files,
files_v,
pages::cluster::cluster_page,
pages::cluster::broker_page,
pages::clusters::clusters_page,
pages::group::group_page,
pages::internals::caches_page,
pages::internals::live_consumers_page,
pages::omnisearch::consumer_search,
pages::omnisearch::consumer_search_p,
pages::omnisearch::omnisearch,
pages::omnisearch::omnisearch_p,
pages::omnisearch::topic_search,
pages::omnisearch::topic_search_p,
pages::topic::topic_page,
api::brokers,
api::cache_brokers,
api::cache_metrics,
api::cache_offsets,
api::cluster_reassignment,
api::live_consumers,
api::cluster_groups,
api::cluster_topics,
api::consumer_search,
api::group_members,
api::group_offsets,
api::topic_groups,
api::topic_search,
api::topic_topology,
live_consumer::topic_tailer_api,
],
)
.launch();
Ok(())
}
|
CachedFile
|
identifier_name
|
structure.rs
|
extern crate rand;
use self::rand::Rng;
// Read files
use std::io::prelude::*;
use std::fs;
use core::renderer::RGB;
use super::Filter;
use core::world::dungeon::map::{self, Measurable, tile, Tile};
///
/// Structure placer
///
/// Generate prefab structures based on files and place them on the grid
///
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Structure {}
impl Structure {
///
/// Add a random structure
///
fn add_rand_struct(&mut self, grid: &mut map::Grid<Tile>) {
// RNG
let mut rng = rand::thread_rng();
// Create a vector out of collecting the read_dir by mapping the unwrapped paths
let paths : Vec<_> = fs::read_dir("./strct").unwrap().map(|res| res.unwrap().path()).collect();
// Choose a random element (aka file from paths)
let fname = rng.choose(&paths).unwrap();
let mut file = fs::File::open(fname).unwrap();
debugln!("struct", format!("adding struct {}", fname.display()));
// Create empty string and read to it
let mut string = String::new();
file.read_to_string(&mut string).unwrap();
// Prepare method to store data read from file
let mut strct : map::Grid<Tile> = vec![];
let mut line : Vec<Tile> = vec![];
// Read file as characters
for ch in string.chars() {
// If not a newline
if ch!= '\n' {
// Match tile based on character
let tile = {
match ch {
'#' => Tile::new("Wall",'', RGB(40, 40, 40), RGB(33, 33, 33), tile::Type::Wall(tile::Wall::Normal)),
// So on windows I've noticed that in the text files for structures, random escape characters get added in so might as well
// just make them floors.
'.' |'' | '\t' | '\r' => Tile::new("Floor",'', RGB(27, 27, 27), RGB(20, 20, 20), tile::Type::Floor(tile::Floor::Normal)),
'"' => Tile::new("Tall Grass", '"', RGB(76, 74, 75), RGB(20, 20, 20), tile::Type::TallGrass),
'&' => Tile::new("Fountain", '&', RGB(201, 195, 195), RGB(20, 20, 20), tile::Type::ArtStructure),
'<' => Tile::new("Up Stair", '<', RGB(255, 255, 255), RGB(0, 0, 0), tile::Type::Stair(tile::Stair::UpStair(tile::UpStair::Normal))),
'>' => Tile::new("Down Stair", '>', RGB(255, 255, 255), RGB(0, 0, 0), tile::Type::Stair(tile::Stair::DownStair(tile::DownStair::Normal))),
_ => panic!("Unknown character: {}", ch)
}
};
// Push character to the line
line.push(tile);
// If we hit a new line we need to push the line to the tile struct, and empty the line
} else {
strct.push(line);
line = vec![];
}
}
// Rotate randomly
// (x, y) rotated 90 degrees around (0, 0) is (-y, x).
// However, vectors are sized in a way that won't allow for negative indexing.
// Our formula for point tranformation should be:
// (-y + total x length, x)
let rot90 = | grid: map::Grid<Tile> | -> map::Grid<Tile> {
// We could clone but I feel like this way is faster
let mut rot_grid = map::Grid::<Tile>::new();
// Measure x on y axis
for x in 0..grid.height() {
// Fill new vecs with init
let mut vec = Vec::<Tile>::new();
// Measure y on x axis
for y in 0..grid.width() {
vec.push(
// Rotation performed by following above function
grid[grid.width() - 1 - y][x].clone()
);
}
rot_grid.push(vec);
}
return rot_grid;
};
// Perform 1 - 4 rotations
// NOTE: 4 rotations = starting positon. Might be a good idea to improve this
for _ in 0..rng.gen_range(0, 4) {
strct = rot90(strct);
}
// Read details of vector
let w = strct.width();
let h = strct.height();
// Read details of map
let total_w = grid.width();
let total_h = grid.height();
// Add to map if possible
let x = rng.gen_range(0, w + 1);
let y = rng.gen_range(0, h + 1);
// Break with no change
if x + w > total_w - 1 || y + h > total_h - 1 { return; }
// Apply change
for tx in x..x+w {
for ty in y..y+h {
grid[tx][ty] = strct[tx-x][ty-y].clone();
}
}
}
///
/// Return a new `Structure`
///
pub fn new() -> Self
|
}
impl Filter for Structure {
type Output = Tile;
fn apply(&mut self, grid: &mut map::Grid<Self::Output>) {
self.add_rand_struct(grid);
}
}
|
{
Structure {}
}
|
identifier_body
|
structure.rs
|
extern crate rand;
use self::rand::Rng;
// Read files
use std::io::prelude::*;
use std::fs;
use core::renderer::RGB;
use super::Filter;
use core::world::dungeon::map::{self, Measurable, tile, Tile};
///
/// Structure placer
///
/// Generate prefab structures based on files and place them on the grid
///
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Structure {}
impl Structure {
///
/// Add a random structure
///
fn add_rand_struct(&mut self, grid: &mut map::Grid<Tile>) {
// RNG
let mut rng = rand::thread_rng();
// Create a vector out of collecting the read_dir by mapping the unwrapped paths
let paths : Vec<_> = fs::read_dir("./strct").unwrap().map(|res| res.unwrap().path()).collect();
// Choose a random element (aka file from paths)
let fname = rng.choose(&paths).unwrap();
let mut file = fs::File::open(fname).unwrap();
debugln!("struct", format!("adding struct {}", fname.display()));
// Create empty string and read to it
let mut string = String::new();
file.read_to_string(&mut string).unwrap();
// Prepare method to store data read from file
let mut strct : map::Grid<Tile> = vec![];
let mut line : Vec<Tile> = vec![];
// Read file as characters
for ch in string.chars() {
// If not a newline
if ch!= '\n' {
// Match tile based on character
let tile = {
match ch {
'#' => Tile::new("Wall",'', RGB(40, 40, 40), RGB(33, 33, 33), tile::Type::Wall(tile::Wall::Normal)),
// So on windows I've noticed that in the text files for structures, random escape characters get added in so might as well
// just make them floors.
'.' |'' | '\t' | '\r' => Tile::new("Floor",'', RGB(27, 27, 27), RGB(20, 20, 20), tile::Type::Floor(tile::Floor::Normal)),
'"' => Tile::new("Tall Grass", '"', RGB(76, 74, 75), RGB(20, 20, 20), tile::Type::TallGrass),
'&' => Tile::new("Fountain", '&', RGB(201, 195, 195), RGB(20, 20, 20), tile::Type::ArtStructure),
'<' => Tile::new("Up Stair", '<', RGB(255, 255, 255), RGB(0, 0, 0), tile::Type::Stair(tile::Stair::UpStair(tile::UpStair::Normal))),
'>' => Tile::new("Down Stair", '>', RGB(255, 255, 255), RGB(0, 0, 0), tile::Type::Stair(tile::Stair::DownStair(tile::DownStair::Normal))),
_ => panic!("Unknown character: {}", ch)
}
};
// Push character to the line
line.push(tile);
// If we hit a new line we need to push the line to the tile struct, and empty the line
} else {
strct.push(line);
line = vec![];
}
}
// Rotate randomly
// (x, y) rotated 90 degrees around (0, 0) is (-y, x).
// However, vectors are sized in a way that won't allow for negative indexing.
// Our formula for point tranformation should be:
// (-y + total x length, x)
let rot90 = | grid: map::Grid<Tile> | -> map::Grid<Tile> {
// We could clone but I feel like this way is faster
let mut rot_grid = map::Grid::<Tile>::new();
// Measure x on y axis
for x in 0..grid.height() {
// Fill new vecs with init
let mut vec = Vec::<Tile>::new();
// Measure y on x axis
for y in 0..grid.width() {
vec.push(
// Rotation performed by following above function
grid[grid.width() - 1 - y][x].clone()
);
}
rot_grid.push(vec);
}
return rot_grid;
};
// Perform 1 - 4 rotations
// NOTE: 4 rotations = starting positon. Might be a good idea to improve this
for _ in 0..rng.gen_range(0, 4) {
strct = rot90(strct);
}
// Read details of vector
let w = strct.width();
let h = strct.height();
// Read details of map
let total_w = grid.width();
let total_h = grid.height();
// Add to map if possible
let x = rng.gen_range(0, w + 1);
let y = rng.gen_range(0, h + 1);
// Break with no change
if x + w > total_w - 1 || y + h > total_h - 1 { return; }
// Apply change
for tx in x..x+w {
for ty in y..y+h {
grid[tx][ty] = strct[tx-x][ty-y].clone();
}
}
}
///
/// Return a new `Structure`
///
pub fn new() -> Self {
Structure {}
}
}
impl Filter for Structure {
type Output = Tile;
fn
|
(&mut self, grid: &mut map::Grid<Self::Output>) {
self.add_rand_struct(grid);
}
}
|
apply
|
identifier_name
|
structure.rs
|
extern crate rand;
use self::rand::Rng;
// Read files
use std::io::prelude::*;
use std::fs;
use core::renderer::RGB;
use super::Filter;
use core::world::dungeon::map::{self, Measurable, tile, Tile};
///
/// Structure placer
///
/// Generate prefab structures based on files and place them on the grid
///
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Structure {}
impl Structure {
///
/// Add a random structure
///
fn add_rand_struct(&mut self, grid: &mut map::Grid<Tile>) {
// RNG
let mut rng = rand::thread_rng();
// Create a vector out of collecting the read_dir by mapping the unwrapped paths
let paths : Vec<_> = fs::read_dir("./strct").unwrap().map(|res| res.unwrap().path()).collect();
// Choose a random element (aka file from paths)
let fname = rng.choose(&paths).unwrap();
let mut file = fs::File::open(fname).unwrap();
debugln!("struct", format!("adding struct {}", fname.display()));
// Create empty string and read to it
let mut string = String::new();
file.read_to_string(&mut string).unwrap();
// Prepare method to store data read from file
let mut strct : map::Grid<Tile> = vec![];
let mut line : Vec<Tile> = vec![];
// Read file as characters
for ch in string.chars() {
// If not a newline
if ch!= '\n' {
// Match tile based on character
let tile = {
match ch {
'#' => Tile::new("Wall",'', RGB(40, 40, 40), RGB(33, 33, 33), tile::Type::Wall(tile::Wall::Normal)),
// So on windows I've noticed that in the text files for structures, random escape characters get added in so might as well
// just make them floors.
'.' |'' | '\t' | '\r' => Tile::new("Floor",'', RGB(27, 27, 27), RGB(20, 20, 20), tile::Type::Floor(tile::Floor::Normal)),
'"' => Tile::new("Tall Grass", '"', RGB(76, 74, 75), RGB(20, 20, 20), tile::Type::TallGrass),
'&' => Tile::new("Fountain", '&', RGB(201, 195, 195), RGB(20, 20, 20), tile::Type::ArtStructure),
'<' => Tile::new("Up Stair", '<', RGB(255, 255, 255), RGB(0, 0, 0), tile::Type::Stair(tile::Stair::UpStair(tile::UpStair::Normal))),
'>' => Tile::new("Down Stair", '>', RGB(255, 255, 255), RGB(0, 0, 0), tile::Type::Stair(tile::Stair::DownStair(tile::DownStair::Normal))),
_ => panic!("Unknown character: {}", ch)
}
};
// Push character to the line
line.push(tile);
// If we hit a new line we need to push the line to the tile struct, and empty the line
} else {
strct.push(line);
line = vec![];
}
}
// Rotate randomly
// (x, y) rotated 90 degrees around (0, 0) is (-y, x).
// However, vectors are sized in a way that won't allow for negative indexing.
// Our formula for point tranformation should be:
// (-y + total x length, x)
let rot90 = | grid: map::Grid<Tile> | -> map::Grid<Tile> {
// We could clone but I feel like this way is faster
let mut rot_grid = map::Grid::<Tile>::new();
// Measure x on y axis
for x in 0..grid.height() {
// Fill new vecs with init
let mut vec = Vec::<Tile>::new();
// Measure y on x axis
for y in 0..grid.width() {
vec.push(
// Rotation performed by following above function
grid[grid.width() - 1 - y][x].clone()
);
}
rot_grid.push(vec);
}
return rot_grid;
};
// Perform 1 - 4 rotations
// NOTE: 4 rotations = starting positon. Might be a good idea to improve this
for _ in 0..rng.gen_range(0, 4) {
|
strct = rot90(strct);
}
// Read details of vector
let w = strct.width();
let h = strct.height();
// Read details of map
let total_w = grid.width();
let total_h = grid.height();
// Add to map if possible
let x = rng.gen_range(0, w + 1);
let y = rng.gen_range(0, h + 1);
// Break with no change
if x + w > total_w - 1 || y + h > total_h - 1 { return; }
// Apply change
for tx in x..x+w {
for ty in y..y+h {
grid[tx][ty] = strct[tx-x][ty-y].clone();
}
}
}
///
/// Return a new `Structure`
///
pub fn new() -> Self {
Structure {}
}
}
impl Filter for Structure {
type Output = Tile;
fn apply(&mut self, grid: &mut map::Grid<Self::Output>) {
self.add_rand_struct(grid);
}
}
|
random_line_split
|
|
promiserejectionevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::PromiseRejectionEventBinding;
use crate::dom::bindings::codegen::Bindings::PromiseRejectionEventBinding::PromiseRejectionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::globalscope::GlobalScope;
use crate::dom::promise::Promise;
use crate::script_runtime::JSContext;
use dom_struct::dom_struct;
use js::jsapi::Heap;
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
use std::rc::Rc;
#[dom_struct]
pub struct PromiseRejectionEvent {
event: Event,
#[ignore_malloc_size_of = "Rc"]
promise: Rc<Promise>,
#[ignore_malloc_size_of = "Defined in rust-mozjs"]
reason: Heap<JSVal>,
}
impl PromiseRejectionEvent {
#[allow(unrooted_must_root)]
fn new_inherited(promise: Rc<Promise>) -> Self {
PromiseRejectionEvent {
event: Event::new_inherited(),
promise,
reason: Heap::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
promise: Rc<Promise>,
reason: HandleValue,
) -> DomRoot<Self> {
let ev = reflect_dom_object(
Box::new(PromiseRejectionEvent::new_inherited(promise)),
global,
PromiseRejectionEventBinding::Wrap,
);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
ev.reason.set(reason.get());
}
ev
}
#[allow(unrooted_must_root)]
pub fn Constructor(
|
let promise = init.promise.clone();
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = PromiseRejectionEvent::new(
global,
Atom::from(type_),
bubbles,
cancelable,
promise,
reason,
);
Ok(event)
}
}
impl PromiseRejectionEventMethods for PromiseRejectionEvent {
// https://html.spec.whatwg.org/multipage/#dom-promiserejectionevent-promise
fn Promise(&self) -> Rc<Promise> {
self.promise.clone()
}
// https://html.spec.whatwg.org/multipage/#dom-promiserejectionevent-reason
fn Reason(&self, _cx: JSContext) -> JSVal {
self.reason.get()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
global: &GlobalScope,
type_: DOMString,
init: RootedTraceableBox<PromiseRejectionEventBinding::PromiseRejectionEventInit>,
) -> Fallible<DomRoot<Self>> {
let reason = init.reason.handle();
|
random_line_split
|
promiserejectionevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::PromiseRejectionEventBinding;
use crate::dom::bindings::codegen::Bindings::PromiseRejectionEventBinding::PromiseRejectionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::globalscope::GlobalScope;
use crate::dom::promise::Promise;
use crate::script_runtime::JSContext;
use dom_struct::dom_struct;
use js::jsapi::Heap;
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
use std::rc::Rc;
#[dom_struct]
pub struct PromiseRejectionEvent {
event: Event,
#[ignore_malloc_size_of = "Rc"]
promise: Rc<Promise>,
#[ignore_malloc_size_of = "Defined in rust-mozjs"]
reason: Heap<JSVal>,
}
impl PromiseRejectionEvent {
#[allow(unrooted_must_root)]
fn new_inherited(promise: Rc<Promise>) -> Self {
PromiseRejectionEvent {
event: Event::new_inherited(),
promise,
reason: Heap::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
promise: Rc<Promise>,
reason: HandleValue,
) -> DomRoot<Self> {
let ev = reflect_dom_object(
Box::new(PromiseRejectionEvent::new_inherited(promise)),
global,
PromiseRejectionEventBinding::Wrap,
);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
ev.reason.set(reason.get());
}
ev
}
#[allow(unrooted_must_root)]
pub fn Constructor(
global: &GlobalScope,
type_: DOMString,
init: RootedTraceableBox<PromiseRejectionEventBinding::PromiseRejectionEventInit>,
) -> Fallible<DomRoot<Self>> {
let reason = init.reason.handle();
let promise = init.promise.clone();
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = PromiseRejectionEvent::new(
global,
Atom::from(type_),
bubbles,
cancelable,
promise,
reason,
);
Ok(event)
}
}
impl PromiseRejectionEventMethods for PromiseRejectionEvent {
// https://html.spec.whatwg.org/multipage/#dom-promiserejectionevent-promise
fn Promise(&self) -> Rc<Promise> {
self.promise.clone()
}
// https://html.spec.whatwg.org/multipage/#dom-promiserejectionevent-reason
fn Reason(&self, _cx: JSContext) -> JSVal
|
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
{
self.reason.get()
}
|
identifier_body
|
promiserejectionevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::PromiseRejectionEventBinding;
use crate::dom::bindings::codegen::Bindings::PromiseRejectionEventBinding::PromiseRejectionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::globalscope::GlobalScope;
use crate::dom::promise::Promise;
use crate::script_runtime::JSContext;
use dom_struct::dom_struct;
use js::jsapi::Heap;
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
use std::rc::Rc;
#[dom_struct]
pub struct PromiseRejectionEvent {
event: Event,
#[ignore_malloc_size_of = "Rc"]
promise: Rc<Promise>,
#[ignore_malloc_size_of = "Defined in rust-mozjs"]
reason: Heap<JSVal>,
}
impl PromiseRejectionEvent {
#[allow(unrooted_must_root)]
fn new_inherited(promise: Rc<Promise>) -> Self {
PromiseRejectionEvent {
event: Event::new_inherited(),
promise,
reason: Heap::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
promise: Rc<Promise>,
reason: HandleValue,
) -> DomRoot<Self> {
let ev = reflect_dom_object(
Box::new(PromiseRejectionEvent::new_inherited(promise)),
global,
PromiseRejectionEventBinding::Wrap,
);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
ev.reason.set(reason.get());
}
ev
}
#[allow(unrooted_must_root)]
pub fn
|
(
global: &GlobalScope,
type_: DOMString,
init: RootedTraceableBox<PromiseRejectionEventBinding::PromiseRejectionEventInit>,
) -> Fallible<DomRoot<Self>> {
let reason = init.reason.handle();
let promise = init.promise.clone();
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = PromiseRejectionEvent::new(
global,
Atom::from(type_),
bubbles,
cancelable,
promise,
reason,
);
Ok(event)
}
}
impl PromiseRejectionEventMethods for PromiseRejectionEvent {
// https://html.spec.whatwg.org/multipage/#dom-promiserejectionevent-promise
fn Promise(&self) -> Rc<Promise> {
self.promise.clone()
}
// https://html.spec.whatwg.org/multipage/#dom-promiserejectionevent-reason
fn Reason(&self, _cx: JSContext) -> JSVal {
self.reason.get()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
Constructor
|
identifier_name
|
lib.rs
|
#[derive(Debug, PartialEq)]
pub enum Comparison {
Unequal,
Equal,
Sublist,
Superlist
}
pub fn sublist<T>(first: &[T], second: &[T]) -> Comparison where T: PartialEq {
let (shortest, longest) =
if first.len() < second.len() { (first, second) }
else { (second, first) };
let mut anchors = Vec::new();
let mut position = 0;
let mut done = first.len() * second.len() == 0;
while!done && position < longest.len() {
if let Some(start) = anchors.first().cloned() {
if longest[position]!= shortest[position - start] {
anchors.remove(0);
position = anchors.first().cloned().unwrap_or(position);
}
}
if longest[position] == shortest[0]
&& anchors.last().map(|&last| last < position).unwrap_or(true) {
anchors.push(position);
}
position += 1;
done = anchors.first().map(|&start| position - start)
|
.map(|length| length >= shortest.len()).unwrap_or(false);
}
if!done { Comparison::Unequal }
else if first.len() < second.len() { Comparison::Sublist }
else if shortest.len() == longest.len() { Comparison::Equal }
else { Comparison::Superlist }
}
|
random_line_split
|
|
lib.rs
|
#[derive(Debug, PartialEq)]
pub enum Comparison {
Unequal,
Equal,
Sublist,
Superlist
}
pub fn sublist<T>(first: &[T], second: &[T]) -> Comparison where T: PartialEq
|
position += 1;
done = anchors.first().map(|&start| position - start)
.map(|length| length >= shortest.len()).unwrap_or(false);
}
if!done { Comparison::Unequal }
else if first.len() < second.len() { Comparison::Sublist }
else if shortest.len() == longest.len() { Comparison::Equal }
else { Comparison::Superlist }
}
|
{
let (shortest, longest) =
if first.len() < second.len() { (first, second) }
else { (second, first) };
let mut anchors = Vec::new();
let mut position = 0;
let mut done = first.len() * second.len() == 0;
while !done && position < longest.len() {
if let Some(start) = anchors.first().cloned() {
if longest[position] != shortest[position - start] {
anchors.remove(0);
position = anchors.first().cloned().unwrap_or(position);
}
}
if longest[position] == shortest[0]
&& anchors.last().map(|&last| last < position).unwrap_or(true) {
anchors.push(position);
}
|
identifier_body
|
lib.rs
|
#[derive(Debug, PartialEq)]
pub enum
|
{
Unequal,
Equal,
Sublist,
Superlist
}
pub fn sublist<T>(first: &[T], second: &[T]) -> Comparison where T: PartialEq {
let (shortest, longest) =
if first.len() < second.len() { (first, second) }
else { (second, first) };
let mut anchors = Vec::new();
let mut position = 0;
let mut done = first.len() * second.len() == 0;
while!done && position < longest.len() {
if let Some(start) = anchors.first().cloned() {
if longest[position]!= shortest[position - start] {
anchors.remove(0);
position = anchors.first().cloned().unwrap_or(position);
}
}
if longest[position] == shortest[0]
&& anchors.last().map(|&last| last < position).unwrap_or(true) {
anchors.push(position);
}
position += 1;
done = anchors.first().map(|&start| position - start)
.map(|length| length >= shortest.len()).unwrap_or(false);
}
if!done { Comparison::Unequal }
else if first.len() < second.len() { Comparison::Sublist }
else if shortest.len() == longest.len() { Comparison::Equal }
else { Comparison::Superlist }
}
|
Comparison
|
identifier_name
|
grid.rs
|
/* ===============================================================================
Simulation of the evolution of the animal world.
Grid of world.
14 Feb 2021.
----------------------------------------------------------------------------
Licensed under the terms of the GPL version 3.
http://www.gnu.org/licenses/gpl-3.0.html
Copyright (c) 2013-2022 by Artem Khomenko [email protected].
=============================================================================== */
use iced::{
canvas::event::{self, Event},
canvas::{self, Cache, Canvas, Cursor, Frame, Geometry, Path, Text, Stroke, },
mouse, Color, Element, Length, Point, Rectangle,
Size, Vector, VerticalAlignment,
};
use std::{ops::RangeInclusive, rc::Rc, cell::RefCell};
use crate::world::{World, };
use crate::update_rate::*;
pub struct Grid {
interaction: Interaction,
life_cache: Cache,
grid_cache: Cache,
translation: Vector,
scaling: f32,
world: Rc<RefCell<World>>,
fps: RefCell<FPS>, // screen refresh rate
tps: TPS, // model time rate, ticks per second
illumination: bool,
}
#[derive(Debug, Clone)]
pub enum Message {
}
impl Grid {
// Default size of one cell
const CELL_SIZE: f32 = 30.0;
// Minimum size to draw a text
const CELL_SIZE_FOR_TEXT: f32 = 120.0;
// Height for line of text
const CELL_TEXT_HEIGHT: f32 = 21.0;
const MIN_SCALING: f32 = 0.1;
const MAX_SCALING: f32 = 200.0;
const STATUS_BAR_HEIGHT: f32 = 30.0;
pub fn new(world: Rc<RefCell<World>>) -> Self {
Self {
interaction: Interaction::None,
life_cache: Cache::default(),
grid_cache: Cache::default(),
translation: Vector::default(),
scaling: 1.0,
world,
fps: RefCell::new(FPS::default()),
tps: TPS::default(),
illumination: false,
}
}
pub fn view<'a>(&'a mut self) -> Element<'a, Message> {
Canvas::new(self)
.width(Length::Fill)
.height(Length::Fill)
.into()
}
fn visible_region(&self, size: Size) -> Region {
let width = size.width / self.scaling;
let height = size.height / self.scaling;
Region {
x: -self.translation.x,
y: -self.translation.y,
width,
height,
}
}
fn project(&self, position: Point, size: Size) -> Point {
let region = self.visible_region(size);
Point::new(
position.x / self.scaling + region.x,
position.y / self.scaling + region.y,
)
}
fn
|
(&self, position_in_units: Point) -> (isize, isize) {
let x = (position_in_units.x / Self::CELL_SIZE).floor() as isize;
let y = (position_in_units.y / Self::CELL_SIZE).floor() as isize;
(x, y)
}
fn set_translation(&mut self, new_translation: Vector) {
self.translation = new_translation;
// World size in pixels
let crate::geom::Size {x: w, y: h} = self.world.borrow().size();
let w = w as f32 * Self::CELL_SIZE;
let h = h as f32 * Self::CELL_SIZE;
// To prevent overflow translation in coninious world
if self.translation.x <= 0.0 {self.translation.x += w}
else if self.translation.x >= w {self.translation.x -= w}
if self.translation.y <= 0.0 {self.translation.y += h}
else if self.translation.y >= h {self.translation.y -= h}
}
// Update rate counters
pub fn clock_chime(&mut self) {
self.fps.borrow_mut().clock_chime();
self.tps.clock_chime(self.world.borrow().ticks_elapsed())
}
pub fn set_illumination(&mut self, checked: bool) {
self.illumination = checked;
}
}
impl<'a> canvas::Program<Message> for Grid {
fn update(
&mut self,
event: Event,
bounds: Rectangle,
cursor: Cursor,
) -> (event::Status, Option<Message>) {
if let Event::Mouse(mouse::Event::ButtonReleased(_)) = event {
self.interaction = Interaction::None;
}
let cursor_position = if let Some(position) = cursor.position_in(&bounds) {position}
else {
return (event::Status::Ignored, None);
};
match event {
Event::Mouse(mouse_event) => match mouse_event {
mouse::Event::ButtonPressed(button) => {
match button {
mouse::Button::Right => {
self.interaction = Interaction::Panning {
translation: self.translation,
start: cursor_position,
};
}
_ => (),
};
(event::Status::Captured, None)
}
mouse::Event::CursorMoved {.. } => {
match self.interaction {
Interaction::Panning { translation, start } => {
self.set_translation(translation + (cursor_position - start) * (1.0 / self.scaling));
self.life_cache.clear();
self.grid_cache.clear();
}
_ => (),
};
let event_status = match self.interaction {
Interaction::None => event::Status::Ignored,
_ => event::Status::Captured,
};
(event_status, None)
}
mouse::Event::WheelScrolled { delta } => match delta {
mouse::ScrollDelta::Lines { y,.. } | mouse::ScrollDelta::Pixels { y,.. } => {
if y < 0.0 && self.scaling > Self::MIN_SCALING || y > 0.0 && self.scaling < Self::MAX_SCALING {
let old_scaling = self.scaling;
self.scaling = (self.scaling * (1.0 + y / Self::CELL_SIZE))
.max(Self::MIN_SCALING)
.min(Self::MAX_SCALING);
if let Some(cursor_to_center) = cursor.position() {
let factor = self.scaling - old_scaling;
self.set_translation(self.translation - Vector::new(
cursor_to_center.x * factor / (old_scaling * old_scaling),
cursor_to_center.y * factor / (old_scaling * old_scaling),
));
}
self.life_cache.clear();
self.grid_cache.clear();
}
(event::Status::Captured, None)
}
},
_ => (event::Status::Ignored, None),
},
_ => (event::Status::Ignored, None),
}
}
fn draw(&self, bounds: Rectangle, cursor: Cursor) -> Vec<Geometry> {
let life = {
let mut frame = Frame::new(bounds.size());
let background = Path::rectangle(Point::ORIGIN, frame.size());
frame.fill(&background, Color::BLACK);
frame.with_save(|frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
// Region to draw
let region = self.visible_region(frame.size());
// The max number of lines of text to fit
let pixels = self.scaling * Self::CELL_SIZE;
let lines_number = if pixels > Self::CELL_SIZE_FOR_TEXT {
(pixels / Self::CELL_TEXT_HEIGHT) as usize
} else {0};
// Draw each point from the region
for point in itertools::iproduct!(region.columns(), region.rows()) {
// Get dot for point (allow display dot outside its real x and y)
let (x, y) = point;
let dot = self.world.borrow().dot(x, y);
let mut color = dot.color;
if self.illumination {
color.a = 1.0;
}
// Fill cell's area with a primary color
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
color,
);
// Draw the text if it fits
if lines_number > 0 {
frame.with_save(|frame| {
frame.translate(Vector::new(0.03, 0.03));
frame.fill_text(Text {
content: self.world.borrow().description(&dot, lines_number, '\n'),
position: Point::new(x as f32, y as f32),
..Text::default()
});
});
}
}
});
frame.into_geometry()
};
// Update FPS, once upon refresh
self.fps.borrow_mut().make_tick();
let overlay = {
let mut frame = Frame::new(bounds.size());
// Translucent bar at the bottom of the window
let frame_width = frame.width();
let frame_height = frame.height();
frame.fill_rectangle(
Point::new(0.0, frame_height - Self::STATUS_BAR_HEIGHT),
Size::new(frame_width, Self::STATUS_BAR_HEIGHT),
Color {
a: 0.9,
..Color::BLACK
}
);
// Text object
let text = Text {
color: Color::WHITE,
vertical_alignment: VerticalAlignment::Bottom,
..Text::default()
};
// Print FPS and model time
let (years, days) = self.world.borrow().date();
frame.fill_text(Text{
position: Point::new(3.0, frame_height - 3.0),
content: format!("{}Y:{}D {} FPS {} TPS", years, days, self.fps.borrow().rate, self.tps.rate),
..text
});
// Get dot below cursor
if let Some(cursor_position) = cursor.position_in(&bounds) {
// Cursor at world coordinates
let (x, y) = self.project_to_world(self.project(cursor_position, bounds.size()));
// Tune scale and offset
frame.with_save(|frame| {
frame.scale(self.scaling); // scale to user's choice
frame.translate(self.translation); // consider the offset of the displayed area
frame.scale(Self::CELL_SIZE); // scale so that the cell with its dimensions occupies exactly one unit
// Paint over a square of unit size
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
Color {
a: 0.5,
..Color::BLACK
},
);
});
// Output info at bottom left edge
let dot = self.world.borrow().dot(x as isize, y as isize);
let description = self.world.borrow().description(&dot, 30,'');
frame.fill_text(Text{
position: Point::new(210.0, frame_height - 3.0),
content: format!("{}:{} {}", dot.x, dot.y, description),
..text
});
}
frame.into_geometry()
};
let grid = self.grid_cache.draw(bounds.size(), |frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
let region = self.visible_region(frame.size());
let rows = region.rows();
let columns = region.columns();
let (total_rows, total_columns) = (rows.clone().count(), columns.clone().count());
let (rows_start, columns_start) = (*rows.start() as f32, *columns.start() as f32);
let crate::geom::Size {x: world_width, y: world_height} = self.world.borrow().size();
// Amount of lines for border around the world
let outer_rows = total_rows / world_height;
let outer_columns = total_columns / world_width;
// Color for world's borders
let special_color = Color::from_rgb8(255, 74, 83);
// No grid at small scale, only outer border
if self.scaling < 0.2 {
// Prepare style
let stroke = Stroke {
width: 1.0,
color: special_color,
..Stroke::default()
};
// Draw horizontal lines
for row in 0..=outer_rows {
let from = Point::new(columns_start, (row * world_height) as f32);
let to = Point::new(total_columns as f32, (row * world_height) as f32);
frame.stroke(&Path::line(from, to), stroke);
}
// Draw vertical lines
for column in 0..=outer_columns {
let from = Point::new((column * world_width) as f32, rows_start);
let to = Point::new((column * world_width) as f32, total_rows as f32);
frame.stroke(&Path::line(from, to), stroke);
}
} else {
// Draw the inner grid
let width = 2.0 / Self::CELL_SIZE;
let color = Color::from_rgb8(70, 74, 83);
frame.translate(Vector::new(-width / 2.0, -width / 2.0));
// Draw horizontal lines
for row in rows {
// There must be a special border when crossing the edge of the world, skipping for optimization
if row!= 0 {
frame.fill_rectangle(
Point::new(columns_start, row as f32),
Size::new(total_columns as f32, width),
color,
);
}
}
// Draw vertical lines
for column in columns {
// There must be a special border when crossing the edge of the world, skipping for optimization
if column!= 0 {
frame.fill_rectangle(
Point::new(column as f32, rows_start),
Size::new(width, total_rows as f32),
color,
);
}
}
// Draw outer borders - horizontal lines
for row in 0..=outer_rows {
frame.fill_rectangle(
Point::new(columns_start, (row * world_height) as f32),
Size::new(total_columns as f32, width),
special_color,
);
}
// Draw outer borders - vertical lines
for column in 0..=outer_columns {
frame.fill_rectangle(
Point::new((column * world_width) as f32, rows_start),
Size::new(width, total_rows as f32),
special_color,
);
}
}
});
vec![life, grid, overlay]
}
fn mouse_interaction(
&self,
_bounds: Rectangle,
_cursor: Cursor,
) -> mouse::Interaction {
match self.interaction {
Interaction::Panning {.. } => mouse::Interaction::Grabbing,
_ => mouse::Interaction::default(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Cell {
i: isize,
j: isize,
}
pub struct Region {
x: f32,
y: f32,
width: f32,
height: f32,
}
impl Region {
fn rows(&self) -> RangeInclusive<isize> {
let first_row = (self.y / Grid::CELL_SIZE).floor() as isize;
let visible_rows =
(self.height / Grid::CELL_SIZE).ceil() as isize;
first_row..=first_row + visible_rows
}
fn columns(&self) -> RangeInclusive<isize> {
let first_column = (self.x / Grid::CELL_SIZE).floor() as isize;
let visible_columns =
(self.width / Grid::CELL_SIZE).ceil() as isize;
first_column..=first_column + visible_columns
}
}
enum Interaction {
None,
Panning { translation: Vector, start: Point },
}
|
project_to_world
|
identifier_name
|
grid.rs
|
/* ===============================================================================
Simulation of the evolution of the animal world.
Grid of world.
14 Feb 2021.
----------------------------------------------------------------------------
Licensed under the terms of the GPL version 3.
http://www.gnu.org/licenses/gpl-3.0.html
Copyright (c) 2013-2022 by Artem Khomenko [email protected].
=============================================================================== */
use iced::{
canvas::event::{self, Event},
canvas::{self, Cache, Canvas, Cursor, Frame, Geometry, Path, Text, Stroke, },
mouse, Color, Element, Length, Point, Rectangle,
Size, Vector, VerticalAlignment,
};
use std::{ops::RangeInclusive, rc::Rc, cell::RefCell};
use crate::world::{World, };
use crate::update_rate::*;
pub struct Grid {
interaction: Interaction,
life_cache: Cache,
grid_cache: Cache,
translation: Vector,
scaling: f32,
world: Rc<RefCell<World>>,
fps: RefCell<FPS>, // screen refresh rate
tps: TPS, // model time rate, ticks per second
illumination: bool,
}
#[derive(Debug, Clone)]
pub enum Message {
}
impl Grid {
// Default size of one cell
const CELL_SIZE: f32 = 30.0;
// Minimum size to draw a text
const CELL_SIZE_FOR_TEXT: f32 = 120.0;
// Height for line of text
const CELL_TEXT_HEIGHT: f32 = 21.0;
const MIN_SCALING: f32 = 0.1;
const MAX_SCALING: f32 = 200.0;
const STATUS_BAR_HEIGHT: f32 = 30.0;
pub fn new(world: Rc<RefCell<World>>) -> Self {
Self {
interaction: Interaction::None,
life_cache: Cache::default(),
grid_cache: Cache::default(),
translation: Vector::default(),
scaling: 1.0,
world,
fps: RefCell::new(FPS::default()),
tps: TPS::default(),
illumination: false,
}
}
pub fn view<'a>(&'a mut self) -> Element<'a, Message> {
Canvas::new(self)
.width(Length::Fill)
.height(Length::Fill)
.into()
}
fn visible_region(&self, size: Size) -> Region {
let width = size.width / self.scaling;
let height = size.height / self.scaling;
Region {
x: -self.translation.x,
y: -self.translation.y,
width,
height,
}
}
|
let region = self.visible_region(size);
Point::new(
position.x / self.scaling + region.x,
position.y / self.scaling + region.y,
)
}
fn project_to_world(&self, position_in_units: Point) -> (isize, isize) {
let x = (position_in_units.x / Self::CELL_SIZE).floor() as isize;
let y = (position_in_units.y / Self::CELL_SIZE).floor() as isize;
(x, y)
}
fn set_translation(&mut self, new_translation: Vector) {
self.translation = new_translation;
// World size in pixels
let crate::geom::Size {x: w, y: h} = self.world.borrow().size();
let w = w as f32 * Self::CELL_SIZE;
let h = h as f32 * Self::CELL_SIZE;
// To prevent overflow translation in coninious world
if self.translation.x <= 0.0 {self.translation.x += w}
else if self.translation.x >= w {self.translation.x -= w}
if self.translation.y <= 0.0 {self.translation.y += h}
else if self.translation.y >= h {self.translation.y -= h}
}
// Update rate counters
pub fn clock_chime(&mut self) {
self.fps.borrow_mut().clock_chime();
self.tps.clock_chime(self.world.borrow().ticks_elapsed())
}
pub fn set_illumination(&mut self, checked: bool) {
self.illumination = checked;
}
}
impl<'a> canvas::Program<Message> for Grid {
fn update(
&mut self,
event: Event,
bounds: Rectangle,
cursor: Cursor,
) -> (event::Status, Option<Message>) {
if let Event::Mouse(mouse::Event::ButtonReleased(_)) = event {
self.interaction = Interaction::None;
}
let cursor_position = if let Some(position) = cursor.position_in(&bounds) {position}
else {
return (event::Status::Ignored, None);
};
match event {
Event::Mouse(mouse_event) => match mouse_event {
mouse::Event::ButtonPressed(button) => {
match button {
mouse::Button::Right => {
self.interaction = Interaction::Panning {
translation: self.translation,
start: cursor_position,
};
}
_ => (),
};
(event::Status::Captured, None)
}
mouse::Event::CursorMoved {.. } => {
match self.interaction {
Interaction::Panning { translation, start } => {
self.set_translation(translation + (cursor_position - start) * (1.0 / self.scaling));
self.life_cache.clear();
self.grid_cache.clear();
}
_ => (),
};
let event_status = match self.interaction {
Interaction::None => event::Status::Ignored,
_ => event::Status::Captured,
};
(event_status, None)
}
mouse::Event::WheelScrolled { delta } => match delta {
mouse::ScrollDelta::Lines { y,.. } | mouse::ScrollDelta::Pixels { y,.. } => {
if y < 0.0 && self.scaling > Self::MIN_SCALING || y > 0.0 && self.scaling < Self::MAX_SCALING {
let old_scaling = self.scaling;
self.scaling = (self.scaling * (1.0 + y / Self::CELL_SIZE))
.max(Self::MIN_SCALING)
.min(Self::MAX_SCALING);
if let Some(cursor_to_center) = cursor.position() {
let factor = self.scaling - old_scaling;
self.set_translation(self.translation - Vector::new(
cursor_to_center.x * factor / (old_scaling * old_scaling),
cursor_to_center.y * factor / (old_scaling * old_scaling),
));
}
self.life_cache.clear();
self.grid_cache.clear();
}
(event::Status::Captured, None)
}
},
_ => (event::Status::Ignored, None),
},
_ => (event::Status::Ignored, None),
}
}
fn draw(&self, bounds: Rectangle, cursor: Cursor) -> Vec<Geometry> {
let life = {
let mut frame = Frame::new(bounds.size());
let background = Path::rectangle(Point::ORIGIN, frame.size());
frame.fill(&background, Color::BLACK);
frame.with_save(|frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
// Region to draw
let region = self.visible_region(frame.size());
// The max number of lines of text to fit
let pixels = self.scaling * Self::CELL_SIZE;
let lines_number = if pixels > Self::CELL_SIZE_FOR_TEXT {
(pixels / Self::CELL_TEXT_HEIGHT) as usize
} else {0};
// Draw each point from the region
for point in itertools::iproduct!(region.columns(), region.rows()) {
// Get dot for point (allow display dot outside its real x and y)
let (x, y) = point;
let dot = self.world.borrow().dot(x, y);
let mut color = dot.color;
if self.illumination {
color.a = 1.0;
}
// Fill cell's area with a primary color
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
color,
);
// Draw the text if it fits
if lines_number > 0 {
frame.with_save(|frame| {
frame.translate(Vector::new(0.03, 0.03));
frame.fill_text(Text {
content: self.world.borrow().description(&dot, lines_number, '\n'),
position: Point::new(x as f32, y as f32),
..Text::default()
});
});
}
}
});
frame.into_geometry()
};
// Update FPS, once upon refresh
self.fps.borrow_mut().make_tick();
let overlay = {
let mut frame = Frame::new(bounds.size());
// Translucent bar at the bottom of the window
let frame_width = frame.width();
let frame_height = frame.height();
frame.fill_rectangle(
Point::new(0.0, frame_height - Self::STATUS_BAR_HEIGHT),
Size::new(frame_width, Self::STATUS_BAR_HEIGHT),
Color {
a: 0.9,
..Color::BLACK
}
);
// Text object
let text = Text {
color: Color::WHITE,
vertical_alignment: VerticalAlignment::Bottom,
..Text::default()
};
// Print FPS and model time
let (years, days) = self.world.borrow().date();
frame.fill_text(Text{
position: Point::new(3.0, frame_height - 3.0),
content: format!("{}Y:{}D {} FPS {} TPS", years, days, self.fps.borrow().rate, self.tps.rate),
..text
});
// Get dot below cursor
if let Some(cursor_position) = cursor.position_in(&bounds) {
// Cursor at world coordinates
let (x, y) = self.project_to_world(self.project(cursor_position, bounds.size()));
// Tune scale and offset
frame.with_save(|frame| {
frame.scale(self.scaling); // scale to user's choice
frame.translate(self.translation); // consider the offset of the displayed area
frame.scale(Self::CELL_SIZE); // scale so that the cell with its dimensions occupies exactly one unit
// Paint over a square of unit size
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
Color {
a: 0.5,
..Color::BLACK
},
);
});
// Output info at bottom left edge
let dot = self.world.borrow().dot(x as isize, y as isize);
let description = self.world.borrow().description(&dot, 30,'');
frame.fill_text(Text{
position: Point::new(210.0, frame_height - 3.0),
content: format!("{}:{} {}", dot.x, dot.y, description),
..text
});
}
frame.into_geometry()
};
let grid = self.grid_cache.draw(bounds.size(), |frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
let region = self.visible_region(frame.size());
let rows = region.rows();
let columns = region.columns();
let (total_rows, total_columns) = (rows.clone().count(), columns.clone().count());
let (rows_start, columns_start) = (*rows.start() as f32, *columns.start() as f32);
let crate::geom::Size {x: world_width, y: world_height} = self.world.borrow().size();
// Amount of lines for border around the world
let outer_rows = total_rows / world_height;
let outer_columns = total_columns / world_width;
// Color for world's borders
let special_color = Color::from_rgb8(255, 74, 83);
// No grid at small scale, only outer border
if self.scaling < 0.2 {
// Prepare style
let stroke = Stroke {
width: 1.0,
color: special_color,
..Stroke::default()
};
// Draw horizontal lines
for row in 0..=outer_rows {
let from = Point::new(columns_start, (row * world_height) as f32);
let to = Point::new(total_columns as f32, (row * world_height) as f32);
frame.stroke(&Path::line(from, to), stroke);
}
// Draw vertical lines
for column in 0..=outer_columns {
let from = Point::new((column * world_width) as f32, rows_start);
let to = Point::new((column * world_width) as f32, total_rows as f32);
frame.stroke(&Path::line(from, to), stroke);
}
} else {
// Draw the inner grid
let width = 2.0 / Self::CELL_SIZE;
let color = Color::from_rgb8(70, 74, 83);
frame.translate(Vector::new(-width / 2.0, -width / 2.0));
// Draw horizontal lines
for row in rows {
// There must be a special border when crossing the edge of the world, skipping for optimization
if row!= 0 {
frame.fill_rectangle(
Point::new(columns_start, row as f32),
Size::new(total_columns as f32, width),
color,
);
}
}
// Draw vertical lines
for column in columns {
// There must be a special border when crossing the edge of the world, skipping for optimization
if column!= 0 {
frame.fill_rectangle(
Point::new(column as f32, rows_start),
Size::new(width, total_rows as f32),
color,
);
}
}
// Draw outer borders - horizontal lines
for row in 0..=outer_rows {
frame.fill_rectangle(
Point::new(columns_start, (row * world_height) as f32),
Size::new(total_columns as f32, width),
special_color,
);
}
// Draw outer borders - vertical lines
for column in 0..=outer_columns {
frame.fill_rectangle(
Point::new((column * world_width) as f32, rows_start),
Size::new(width, total_rows as f32),
special_color,
);
}
}
});
vec![life, grid, overlay]
}
fn mouse_interaction(
&self,
_bounds: Rectangle,
_cursor: Cursor,
) -> mouse::Interaction {
match self.interaction {
Interaction::Panning {.. } => mouse::Interaction::Grabbing,
_ => mouse::Interaction::default(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Cell {
i: isize,
j: isize,
}
pub struct Region {
x: f32,
y: f32,
width: f32,
height: f32,
}
impl Region {
fn rows(&self) -> RangeInclusive<isize> {
let first_row = (self.y / Grid::CELL_SIZE).floor() as isize;
let visible_rows =
(self.height / Grid::CELL_SIZE).ceil() as isize;
first_row..=first_row + visible_rows
}
fn columns(&self) -> RangeInclusive<isize> {
let first_column = (self.x / Grid::CELL_SIZE).floor() as isize;
let visible_columns =
(self.width / Grid::CELL_SIZE).ceil() as isize;
first_column..=first_column + visible_columns
}
}
enum Interaction {
None,
Panning { translation: Vector, start: Point },
}
|
fn project(&self, position: Point, size: Size) -> Point {
|
random_line_split
|
grid.rs
|
/* ===============================================================================
Simulation of the evolution of the animal world.
Grid of world.
14 Feb 2021.
----------------------------------------------------------------------------
Licensed under the terms of the GPL version 3.
http://www.gnu.org/licenses/gpl-3.0.html
Copyright (c) 2013-2022 by Artem Khomenko [email protected].
=============================================================================== */
use iced::{
canvas::event::{self, Event},
canvas::{self, Cache, Canvas, Cursor, Frame, Geometry, Path, Text, Stroke, },
mouse, Color, Element, Length, Point, Rectangle,
Size, Vector, VerticalAlignment,
};
use std::{ops::RangeInclusive, rc::Rc, cell::RefCell};
use crate::world::{World, };
use crate::update_rate::*;
pub struct Grid {
interaction: Interaction,
life_cache: Cache,
grid_cache: Cache,
translation: Vector,
scaling: f32,
world: Rc<RefCell<World>>,
fps: RefCell<FPS>, // screen refresh rate
tps: TPS, // model time rate, ticks per second
illumination: bool,
}
#[derive(Debug, Clone)]
pub enum Message {
}
impl Grid {
// Default size of one cell
const CELL_SIZE: f32 = 30.0;
// Minimum size to draw a text
const CELL_SIZE_FOR_TEXT: f32 = 120.0;
// Height for line of text
const CELL_TEXT_HEIGHT: f32 = 21.0;
const MIN_SCALING: f32 = 0.1;
const MAX_SCALING: f32 = 200.0;
const STATUS_BAR_HEIGHT: f32 = 30.0;
pub fn new(world: Rc<RefCell<World>>) -> Self {
Self {
interaction: Interaction::None,
life_cache: Cache::default(),
grid_cache: Cache::default(),
translation: Vector::default(),
scaling: 1.0,
world,
fps: RefCell::new(FPS::default()),
tps: TPS::default(),
illumination: false,
}
}
pub fn view<'a>(&'a mut self) -> Element<'a, Message> {
Canvas::new(self)
.width(Length::Fill)
.height(Length::Fill)
.into()
}
fn visible_region(&self, size: Size) -> Region {
let width = size.width / self.scaling;
let height = size.height / self.scaling;
Region {
x: -self.translation.x,
y: -self.translation.y,
width,
height,
}
}
fn project(&self, position: Point, size: Size) -> Point {
let region = self.visible_region(size);
Point::new(
position.x / self.scaling + region.x,
position.y / self.scaling + region.y,
)
}
fn project_to_world(&self, position_in_units: Point) -> (isize, isize) {
let x = (position_in_units.x / Self::CELL_SIZE).floor() as isize;
let y = (position_in_units.y / Self::CELL_SIZE).floor() as isize;
(x, y)
}
fn set_translation(&mut self, new_translation: Vector) {
self.translation = new_translation;
// World size in pixels
let crate::geom::Size {x: w, y: h} = self.world.borrow().size();
let w = w as f32 * Self::CELL_SIZE;
let h = h as f32 * Self::CELL_SIZE;
// To prevent overflow translation in coninious world
if self.translation.x <= 0.0 {self.translation.x += w}
else if self.translation.x >= w {self.translation.x -= w}
if self.translation.y <= 0.0 {self.translation.y += h}
else if self.translation.y >= h {self.translation.y -= h}
}
// Update rate counters
pub fn clock_chime(&mut self) {
self.fps.borrow_mut().clock_chime();
self.tps.clock_chime(self.world.borrow().ticks_elapsed())
}
pub fn set_illumination(&mut self, checked: bool) {
self.illumination = checked;
}
}
impl<'a> canvas::Program<Message> for Grid {
fn update(
&mut self,
event: Event,
bounds: Rectangle,
cursor: Cursor,
) -> (event::Status, Option<Message>) {
if let Event::Mouse(mouse::Event::ButtonReleased(_)) = event {
self.interaction = Interaction::None;
}
let cursor_position = if let Some(position) = cursor.position_in(&bounds) {position}
else {
return (event::Status::Ignored, None);
};
match event {
Event::Mouse(mouse_event) => match mouse_event {
mouse::Event::ButtonPressed(button) => {
match button {
mouse::Button::Right => {
self.interaction = Interaction::Panning {
translation: self.translation,
start: cursor_position,
};
}
_ => (),
};
(event::Status::Captured, None)
}
mouse::Event::CursorMoved {.. } => {
match self.interaction {
Interaction::Panning { translation, start } => {
self.set_translation(translation + (cursor_position - start) * (1.0 / self.scaling));
self.life_cache.clear();
self.grid_cache.clear();
}
_ => (),
};
let event_status = match self.interaction {
Interaction::None => event::Status::Ignored,
_ => event::Status::Captured,
};
(event_status, None)
}
mouse::Event::WheelScrolled { delta } => match delta {
mouse::ScrollDelta::Lines { y,.. } | mouse::ScrollDelta::Pixels { y,.. } => {
if y < 0.0 && self.scaling > Self::MIN_SCALING || y > 0.0 && self.scaling < Self::MAX_SCALING {
let old_scaling = self.scaling;
self.scaling = (self.scaling * (1.0 + y / Self::CELL_SIZE))
.max(Self::MIN_SCALING)
.min(Self::MAX_SCALING);
if let Some(cursor_to_center) = cursor.position() {
let factor = self.scaling - old_scaling;
self.set_translation(self.translation - Vector::new(
cursor_to_center.x * factor / (old_scaling * old_scaling),
cursor_to_center.y * factor / (old_scaling * old_scaling),
));
}
self.life_cache.clear();
self.grid_cache.clear();
}
(event::Status::Captured, None)
}
},
_ => (event::Status::Ignored, None),
},
_ => (event::Status::Ignored, None),
}
}
fn draw(&self, bounds: Rectangle, cursor: Cursor) -> Vec<Geometry> {
let life = {
let mut frame = Frame::new(bounds.size());
let background = Path::rectangle(Point::ORIGIN, frame.size());
frame.fill(&background, Color::BLACK);
frame.with_save(|frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
// Region to draw
let region = self.visible_region(frame.size());
// The max number of lines of text to fit
let pixels = self.scaling * Self::CELL_SIZE;
let lines_number = if pixels > Self::CELL_SIZE_FOR_TEXT {
(pixels / Self::CELL_TEXT_HEIGHT) as usize
} else {0};
// Draw each point from the region
for point in itertools::iproduct!(region.columns(), region.rows()) {
// Get dot for point (allow display dot outside its real x and y)
let (x, y) = point;
let dot = self.world.borrow().dot(x, y);
let mut color = dot.color;
if self.illumination {
color.a = 1.0;
}
// Fill cell's area with a primary color
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
color,
);
// Draw the text if it fits
if lines_number > 0 {
frame.with_save(|frame| {
frame.translate(Vector::new(0.03, 0.03));
frame.fill_text(Text {
content: self.world.borrow().description(&dot, lines_number, '\n'),
position: Point::new(x as f32, y as f32),
..Text::default()
});
});
}
}
});
frame.into_geometry()
};
// Update FPS, once upon refresh
self.fps.borrow_mut().make_tick();
let overlay = {
let mut frame = Frame::new(bounds.size());
// Translucent bar at the bottom of the window
let frame_width = frame.width();
let frame_height = frame.height();
frame.fill_rectangle(
Point::new(0.0, frame_height - Self::STATUS_BAR_HEIGHT),
Size::new(frame_width, Self::STATUS_BAR_HEIGHT),
Color {
a: 0.9,
..Color::BLACK
}
);
// Text object
let text = Text {
color: Color::WHITE,
vertical_alignment: VerticalAlignment::Bottom,
..Text::default()
};
// Print FPS and model time
let (years, days) = self.world.borrow().date();
frame.fill_text(Text{
position: Point::new(3.0, frame_height - 3.0),
content: format!("{}Y:{}D {} FPS {} TPS", years, days, self.fps.borrow().rate, self.tps.rate),
..text
});
// Get dot below cursor
if let Some(cursor_position) = cursor.position_in(&bounds) {
// Cursor at world coordinates
let (x, y) = self.project_to_world(self.project(cursor_position, bounds.size()));
// Tune scale and offset
frame.with_save(|frame| {
frame.scale(self.scaling); // scale to user's choice
frame.translate(self.translation); // consider the offset of the displayed area
frame.scale(Self::CELL_SIZE); // scale so that the cell with its dimensions occupies exactly one unit
// Paint over a square of unit size
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
Color {
a: 0.5,
..Color::BLACK
},
);
});
// Output info at bottom left edge
let dot = self.world.borrow().dot(x as isize, y as isize);
let description = self.world.borrow().description(&dot, 30,'');
frame.fill_text(Text{
position: Point::new(210.0, frame_height - 3.0),
content: format!("{}:{} {}", dot.x, dot.y, description),
..text
});
}
frame.into_geometry()
};
let grid = self.grid_cache.draw(bounds.size(), |frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
let region = self.visible_region(frame.size());
let rows = region.rows();
let columns = region.columns();
let (total_rows, total_columns) = (rows.clone().count(), columns.clone().count());
let (rows_start, columns_start) = (*rows.start() as f32, *columns.start() as f32);
let crate::geom::Size {x: world_width, y: world_height} = self.world.borrow().size();
// Amount of lines for border around the world
let outer_rows = total_rows / world_height;
let outer_columns = total_columns / world_width;
// Color for world's borders
let special_color = Color::from_rgb8(255, 74, 83);
// No grid at small scale, only outer border
if self.scaling < 0.2 {
// Prepare style
let stroke = Stroke {
width: 1.0,
color: special_color,
..Stroke::default()
};
// Draw horizontal lines
for row in 0..=outer_rows {
let from = Point::new(columns_start, (row * world_height) as f32);
let to = Point::new(total_columns as f32, (row * world_height) as f32);
frame.stroke(&Path::line(from, to), stroke);
}
// Draw vertical lines
for column in 0..=outer_columns {
let from = Point::new((column * world_width) as f32, rows_start);
let to = Point::new((column * world_width) as f32, total_rows as f32);
frame.stroke(&Path::line(from, to), stroke);
}
} else
|
for column in columns {
// There must be a special border when crossing the edge of the world, skipping for optimization
if column!= 0 {
frame.fill_rectangle(
Point::new(column as f32, rows_start),
Size::new(width, total_rows as f32),
color,
);
}
}
// Draw outer borders - horizontal lines
for row in 0..=outer_rows {
frame.fill_rectangle(
Point::new(columns_start, (row * world_height) as f32),
Size::new(total_columns as f32, width),
special_color,
);
}
// Draw outer borders - vertical lines
for column in 0..=outer_columns {
frame.fill_rectangle(
Point::new((column * world_width) as f32, rows_start),
Size::new(width, total_rows as f32),
special_color,
);
}
}
});
vec![life, grid, overlay]
}
fn mouse_interaction(
&self,
_bounds: Rectangle,
_cursor: Cursor,
) -> mouse::Interaction {
match self.interaction {
Interaction::Panning {.. } => mouse::Interaction::Grabbing,
_ => mouse::Interaction::default(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Cell {
i: isize,
j: isize,
}
pub struct Region {
x: f32,
y: f32,
width: f32,
height: f32,
}
impl Region {
fn rows(&self) -> RangeInclusive<isize> {
let first_row = (self.y / Grid::CELL_SIZE).floor() as isize;
let visible_rows =
(self.height / Grid::CELL_SIZE).ceil() as isize;
first_row..=first_row + visible_rows
}
fn columns(&self) -> RangeInclusive<isize> {
let first_column = (self.x / Grid::CELL_SIZE).floor() as isize;
let visible_columns =
(self.width / Grid::CELL_SIZE).ceil() as isize;
first_column..=first_column + visible_columns
}
}
enum Interaction {
None,
Panning { translation: Vector, start: Point },
}
|
{
// Draw the inner grid
let width = 2.0 / Self::CELL_SIZE;
let color = Color::from_rgb8(70, 74, 83);
frame.translate(Vector::new(-width / 2.0, -width / 2.0));
// Draw horizontal lines
for row in rows {
// There must be a special border when crossing the edge of the world, skipping for optimization
if row != 0 {
frame.fill_rectangle(
Point::new(columns_start, row as f32),
Size::new(total_columns as f32, width),
color,
);
}
}
// Draw vertical lines
|
conditional_block
|
grid.rs
|
/* ===============================================================================
Simulation of the evolution of the animal world.
Grid of world.
14 Feb 2021.
----------------------------------------------------------------------------
Licensed under the terms of the GPL version 3.
http://www.gnu.org/licenses/gpl-3.0.html
Copyright (c) 2013-2022 by Artem Khomenko [email protected].
=============================================================================== */
use iced::{
canvas::event::{self, Event},
canvas::{self, Cache, Canvas, Cursor, Frame, Geometry, Path, Text, Stroke, },
mouse, Color, Element, Length, Point, Rectangle,
Size, Vector, VerticalAlignment,
};
use std::{ops::RangeInclusive, rc::Rc, cell::RefCell};
use crate::world::{World, };
use crate::update_rate::*;
pub struct Grid {
interaction: Interaction,
life_cache: Cache,
grid_cache: Cache,
translation: Vector,
scaling: f32,
world: Rc<RefCell<World>>,
fps: RefCell<FPS>, // screen refresh rate
tps: TPS, // model time rate, ticks per second
illumination: bool,
}
#[derive(Debug, Clone)]
pub enum Message {
}
impl Grid {
// Default size of one cell
const CELL_SIZE: f32 = 30.0;
// Minimum size to draw a text
const CELL_SIZE_FOR_TEXT: f32 = 120.0;
// Height for line of text
const CELL_TEXT_HEIGHT: f32 = 21.0;
const MIN_SCALING: f32 = 0.1;
const MAX_SCALING: f32 = 200.0;
const STATUS_BAR_HEIGHT: f32 = 30.0;
pub fn new(world: Rc<RefCell<World>>) -> Self {
Self {
interaction: Interaction::None,
life_cache: Cache::default(),
grid_cache: Cache::default(),
translation: Vector::default(),
scaling: 1.0,
world,
fps: RefCell::new(FPS::default()),
tps: TPS::default(),
illumination: false,
}
}
pub fn view<'a>(&'a mut self) -> Element<'a, Message> {
Canvas::new(self)
.width(Length::Fill)
.height(Length::Fill)
.into()
}
fn visible_region(&self, size: Size) -> Region
|
fn project(&self, position: Point, size: Size) -> Point {
let region = self.visible_region(size);
Point::new(
position.x / self.scaling + region.x,
position.y / self.scaling + region.y,
)
}
fn project_to_world(&self, position_in_units: Point) -> (isize, isize) {
let x = (position_in_units.x / Self::CELL_SIZE).floor() as isize;
let y = (position_in_units.y / Self::CELL_SIZE).floor() as isize;
(x, y)
}
fn set_translation(&mut self, new_translation: Vector) {
self.translation = new_translation;
// World size in pixels
let crate::geom::Size {x: w, y: h} = self.world.borrow().size();
let w = w as f32 * Self::CELL_SIZE;
let h = h as f32 * Self::CELL_SIZE;
// To prevent overflow translation in coninious world
if self.translation.x <= 0.0 {self.translation.x += w}
else if self.translation.x >= w {self.translation.x -= w}
if self.translation.y <= 0.0 {self.translation.y += h}
else if self.translation.y >= h {self.translation.y -= h}
}
// Update rate counters
pub fn clock_chime(&mut self) {
self.fps.borrow_mut().clock_chime();
self.tps.clock_chime(self.world.borrow().ticks_elapsed())
}
pub fn set_illumination(&mut self, checked: bool) {
self.illumination = checked;
}
}
impl<'a> canvas::Program<Message> for Grid {
fn update(
&mut self,
event: Event,
bounds: Rectangle,
cursor: Cursor,
) -> (event::Status, Option<Message>) {
if let Event::Mouse(mouse::Event::ButtonReleased(_)) = event {
self.interaction = Interaction::None;
}
let cursor_position = if let Some(position) = cursor.position_in(&bounds) {position}
else {
return (event::Status::Ignored, None);
};
match event {
Event::Mouse(mouse_event) => match mouse_event {
mouse::Event::ButtonPressed(button) => {
match button {
mouse::Button::Right => {
self.interaction = Interaction::Panning {
translation: self.translation,
start: cursor_position,
};
}
_ => (),
};
(event::Status::Captured, None)
}
mouse::Event::CursorMoved {.. } => {
match self.interaction {
Interaction::Panning { translation, start } => {
self.set_translation(translation + (cursor_position - start) * (1.0 / self.scaling));
self.life_cache.clear();
self.grid_cache.clear();
}
_ => (),
};
let event_status = match self.interaction {
Interaction::None => event::Status::Ignored,
_ => event::Status::Captured,
};
(event_status, None)
}
mouse::Event::WheelScrolled { delta } => match delta {
mouse::ScrollDelta::Lines { y,.. } | mouse::ScrollDelta::Pixels { y,.. } => {
if y < 0.0 && self.scaling > Self::MIN_SCALING || y > 0.0 && self.scaling < Self::MAX_SCALING {
let old_scaling = self.scaling;
self.scaling = (self.scaling * (1.0 + y / Self::CELL_SIZE))
.max(Self::MIN_SCALING)
.min(Self::MAX_SCALING);
if let Some(cursor_to_center) = cursor.position() {
let factor = self.scaling - old_scaling;
self.set_translation(self.translation - Vector::new(
cursor_to_center.x * factor / (old_scaling * old_scaling),
cursor_to_center.y * factor / (old_scaling * old_scaling),
));
}
self.life_cache.clear();
self.grid_cache.clear();
}
(event::Status::Captured, None)
}
},
_ => (event::Status::Ignored, None),
},
_ => (event::Status::Ignored, None),
}
}
fn draw(&self, bounds: Rectangle, cursor: Cursor) -> Vec<Geometry> {
let life = {
let mut frame = Frame::new(bounds.size());
let background = Path::rectangle(Point::ORIGIN, frame.size());
frame.fill(&background, Color::BLACK);
frame.with_save(|frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
// Region to draw
let region = self.visible_region(frame.size());
// The max number of lines of text to fit
let pixels = self.scaling * Self::CELL_SIZE;
let lines_number = if pixels > Self::CELL_SIZE_FOR_TEXT {
(pixels / Self::CELL_TEXT_HEIGHT) as usize
} else {0};
// Draw each point from the region
for point in itertools::iproduct!(region.columns(), region.rows()) {
// Get dot for point (allow display dot outside its real x and y)
let (x, y) = point;
let dot = self.world.borrow().dot(x, y);
let mut color = dot.color;
if self.illumination {
color.a = 1.0;
}
// Fill cell's area with a primary color
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
color,
);
// Draw the text if it fits
if lines_number > 0 {
frame.with_save(|frame| {
frame.translate(Vector::new(0.03, 0.03));
frame.fill_text(Text {
content: self.world.borrow().description(&dot, lines_number, '\n'),
position: Point::new(x as f32, y as f32),
..Text::default()
});
});
}
}
});
frame.into_geometry()
};
// Update FPS, once upon refresh
self.fps.borrow_mut().make_tick();
let overlay = {
let mut frame = Frame::new(bounds.size());
// Translucent bar at the bottom of the window
let frame_width = frame.width();
let frame_height = frame.height();
frame.fill_rectangle(
Point::new(0.0, frame_height - Self::STATUS_BAR_HEIGHT),
Size::new(frame_width, Self::STATUS_BAR_HEIGHT),
Color {
a: 0.9,
..Color::BLACK
}
);
// Text object
let text = Text {
color: Color::WHITE,
vertical_alignment: VerticalAlignment::Bottom,
..Text::default()
};
// Print FPS and model time
let (years, days) = self.world.borrow().date();
frame.fill_text(Text{
position: Point::new(3.0, frame_height - 3.0),
content: format!("{}Y:{}D {} FPS {} TPS", years, days, self.fps.borrow().rate, self.tps.rate),
..text
});
// Get dot below cursor
if let Some(cursor_position) = cursor.position_in(&bounds) {
// Cursor at world coordinates
let (x, y) = self.project_to_world(self.project(cursor_position, bounds.size()));
// Tune scale and offset
frame.with_save(|frame| {
frame.scale(self.scaling); // scale to user's choice
frame.translate(self.translation); // consider the offset of the displayed area
frame.scale(Self::CELL_SIZE); // scale so that the cell with its dimensions occupies exactly one unit
// Paint over a square of unit size
frame.fill_rectangle(
Point::new(x as f32, y as f32),
Size::UNIT,
Color {
a: 0.5,
..Color::BLACK
},
);
});
// Output info at bottom left edge
let dot = self.world.borrow().dot(x as isize, y as isize);
let description = self.world.borrow().description(&dot, 30,'');
frame.fill_text(Text{
position: Point::new(210.0, frame_height - 3.0),
content: format!("{}:{} {}", dot.x, dot.y, description),
..text
});
}
frame.into_geometry()
};
let grid = self.grid_cache.draw(bounds.size(), |frame| {
frame.scale(self.scaling);
frame.translate(self.translation);
frame.scale(Self::CELL_SIZE);
let region = self.visible_region(frame.size());
let rows = region.rows();
let columns = region.columns();
let (total_rows, total_columns) = (rows.clone().count(), columns.clone().count());
let (rows_start, columns_start) = (*rows.start() as f32, *columns.start() as f32);
let crate::geom::Size {x: world_width, y: world_height} = self.world.borrow().size();
// Amount of lines for border around the world
let outer_rows = total_rows / world_height;
let outer_columns = total_columns / world_width;
// Color for world's borders
let special_color = Color::from_rgb8(255, 74, 83);
// No grid at small scale, only outer border
if self.scaling < 0.2 {
// Prepare style
let stroke = Stroke {
width: 1.0,
color: special_color,
..Stroke::default()
};
// Draw horizontal lines
for row in 0..=outer_rows {
let from = Point::new(columns_start, (row * world_height) as f32);
let to = Point::new(total_columns as f32, (row * world_height) as f32);
frame.stroke(&Path::line(from, to), stroke);
}
// Draw vertical lines
for column in 0..=outer_columns {
let from = Point::new((column * world_width) as f32, rows_start);
let to = Point::new((column * world_width) as f32, total_rows as f32);
frame.stroke(&Path::line(from, to), stroke);
}
} else {
// Draw the inner grid
let width = 2.0 / Self::CELL_SIZE;
let color = Color::from_rgb8(70, 74, 83);
frame.translate(Vector::new(-width / 2.0, -width / 2.0));
// Draw horizontal lines
for row in rows {
// There must be a special border when crossing the edge of the world, skipping for optimization
if row!= 0 {
frame.fill_rectangle(
Point::new(columns_start, row as f32),
Size::new(total_columns as f32, width),
color,
);
}
}
// Draw vertical lines
for column in columns {
// There must be a special border when crossing the edge of the world, skipping for optimization
if column!= 0 {
frame.fill_rectangle(
Point::new(column as f32, rows_start),
Size::new(width, total_rows as f32),
color,
);
}
}
// Draw outer borders - horizontal lines
for row in 0..=outer_rows {
frame.fill_rectangle(
Point::new(columns_start, (row * world_height) as f32),
Size::new(total_columns as f32, width),
special_color,
);
}
// Draw outer borders - vertical lines
for column in 0..=outer_columns {
frame.fill_rectangle(
Point::new((column * world_width) as f32, rows_start),
Size::new(width, total_rows as f32),
special_color,
);
}
}
});
vec![life, grid, overlay]
}
fn mouse_interaction(
&self,
_bounds: Rectangle,
_cursor: Cursor,
) -> mouse::Interaction {
match self.interaction {
Interaction::Panning {.. } => mouse::Interaction::Grabbing,
_ => mouse::Interaction::default(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Cell {
i: isize,
j: isize,
}
pub struct Region {
x: f32,
y: f32,
width: f32,
height: f32,
}
impl Region {
fn rows(&self) -> RangeInclusive<isize> {
let first_row = (self.y / Grid::CELL_SIZE).floor() as isize;
let visible_rows =
(self.height / Grid::CELL_SIZE).ceil() as isize;
first_row..=first_row + visible_rows
}
fn columns(&self) -> RangeInclusive<isize> {
let first_column = (self.x / Grid::CELL_SIZE).floor() as isize;
let visible_columns =
(self.width / Grid::CELL_SIZE).ceil() as isize;
first_column..=first_column + visible_columns
}
}
enum Interaction {
None,
Panning { translation: Vector, start: Point },
}
|
{
let width = size.width / self.scaling;
let height = size.height / self.scaling;
Region {
x: -self.translation.x,
y: -self.translation.y,
width,
height,
}
}
|
identifier_body
|
restrict-type-argument.rs
|
fn
|
<T: Send>(val: T) {}
fn use_impl_sync(val: impl Sync) {
is_send(val); //~ ERROR `impl Sync` cannot be sent between threads safely
}
fn use_where<S>(val: S) where S: Sync {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound<S: Sync>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound_2<
S // Make sure we can synthezise a correct suggestion span for this case
:
Sync
>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound_and_where<S: Sync>(val: S) where S: std::fmt::Debug {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_unbound<S>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn main() {}
|
is_send
|
identifier_name
|
restrict-type-argument.rs
|
fn is_send<T: Send>(val: T) {}
fn use_impl_sync(val: impl Sync) {
is_send(val); //~ ERROR `impl Sync` cannot be sent between threads safely
}
|
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound_2<
S // Make sure we can synthezise a correct suggestion span for this case
:
Sync
>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound_and_where<S: Sync>(val: S) where S: std::fmt::Debug {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_unbound<S>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn main() {}
|
fn use_where<S>(val: S) where S: Sync {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound<S: Sync>(val: S) {
|
random_line_split
|
restrict-type-argument.rs
|
fn is_send<T: Send>(val: T)
|
fn use_impl_sync(val: impl Sync) {
is_send(val); //~ ERROR `impl Sync` cannot be sent between threads safely
}
fn use_where<S>(val: S) where S: Sync {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound<S: Sync>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound_2<
S // Make sure we can synthezise a correct suggestion span for this case
:
Sync
>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_bound_and_where<S: Sync>(val: S) where S: std::fmt::Debug {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn use_unbound<S>(val: S) {
is_send(val); //~ ERROR `S` cannot be sent between threads safely
}
fn main() {}
|
{}
|
identifier_body
|
glyph.rs
|
u8 = 0x2;
fn break_flag_to_enum(flag: u8) -> BreakType {
if (flag & BREAK_TYPE_NORMAL)!= 0 {
BreakType::Normal
} else if (flag & BREAK_TYPE_HYPHEN)!= 0 {
BreakType::Hyphen
} else {
BreakType::None
}
}
fn break_enum_to_flag(e: BreakType) -> u8 {
match e {
BreakType::None => BREAK_TYPE_NONE,
BreakType::Normal => BREAK_TYPE_NORMAL,
BreakType::Hyphen => BREAK_TYPE_HYPHEN,
}
}
// TODO: make this more type-safe.
static FLAG_CHAR_IS_SPACE: u32 = 0x10000000;
// These two bits store some BREAK_TYPE_* flags
static FLAG_CAN_BREAK_MASK: u32 = 0x60000000;
static FLAG_CAN_BREAK_SHIFT: u32 = 29;
static FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
static GLYPH_ADVANCE_MASK: u32 = 0x0FFF0000;
static GLYPH_ADVANCE_SHIFT: u32 = 16;
static GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char. If the char couldn't
// be mapped to a glyph (!FLAG_NOT_MISSING), then this actually holds
// the UTF8 code point instead.
static GLYPH_COUNT_MASK: u32 = 0x00FFFF00;
static GLYPH_COUNT_SHIFT: u32 = 8;
// N.B. following Gecko, these are all inverted so that a lot of
// missing chars can be memset with zeros in one fell swoop.
static FLAG_NOT_MISSING: u32 = 0x00000001;
static FLAG_NOT_CLUSTER_START: u32 = 0x00000002;
static FLAG_NOT_LIGATURE_GROUP_START: u32 = 0x00000004;
static FLAG_CHAR_IS_TAB: u32 = 0x00000008;
static FLAG_CHAR_IS_NEWLINE: u32 = 0x00000010;
//static FLAG_CHAR_IS_LOW_SURROGATE: u32 = 0x00000020;
//static CHAR_IDENTITY_FLAGS_MASK: u32 = 0x00000038;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
// getter methods
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
fn is_ligature_start(&self) -> bool {
self.has_flag(!FLAG_NOT_LIGATURE_GROUP_START)
}
fn is_cluster_start(&self) -> bool {
self.has_flag(!FLAG_NOT_CLUSTER_START)
}
// True if original char was normal (U+0020) space. Other chars may
// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
fn char_is_tab(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_TAB)
}
fn char_is_newline(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_NEWLINE)
}
fn can_break_before(&self) -> BreakType {
let flag = ((self.value & FLAG_CAN_BREAK_MASK) >> FLAG_CAN_BREAK_SHIFT) as u8;
break_flag_to_enum(flag)
}
// setter methods
#[inline(always)]
fn set_char_is_space(&self) -> GlyphEntry {
GlyphEntry::new(self.value | FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_tab(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_TAB)
}
#[inline(always)]
fn set_char_is_newline(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_NEWLINE)
}
#[inline(always)]
fn set_can_break_before(&self, e: BreakType) -> GlyphEntry {
let flag = (break_enum_to_flag(e) as u32) << FLAG_CAN_BREAK_SHIFT;
GlyphEntry::new(self.value | flag)
}
// helper methods
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
((self.value & GLYPH_COUNT_MASK) >> GLYPH_COUNT_SHIFT) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
#[inline(always)]
fn adapt_character_flags_of_entry(&self, other: GlyphEntry) -> GlyphEntry {
GlyphEntry { value: self.value | other.value }
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: CharIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: CharIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.push_all(glyphs);
self.lookup_is_sorted = false;
}
fn get_detailed_glyphs_for_entry(&'a self, entry_offset: CharIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn get_detailed_glyph_with_index(&'a self,
entry_offset: CharIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records : Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
is_missing: is_missing,
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, CharIndex),
Detail(&'a GlyphStore, CharIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
is_whitespace: bool,
}
int_range_index! {
#[derive(RustcEncodable)]
#[doc = "An index that refers to a character in a text run. This could \
point to the middle of a glyph."]
struct CharIndex(isize)
}
impl<'a> GlyphStore {
// Initializes the glyph store, but doesn't actually shape anything.
// Use the set_glyph, set_glyphs() methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: repeat(GlyphEntry::initial()).take(length)
.collect(),
detail_store: DetailedGlyphStore::new(),
is_whitespace: is_whitespace,
}
}
pub fn char_len(&self) -> CharIndex {
CharIndex(self.entry_buffer.len() as isize)
}
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
}
/// Adds a single glyph. If `character` is present, this represents a single character;
/// otherwise, this glyph represents multiple characters.
pub fn add_glyph_for_char_index(&mut self,
i: CharIndex,
character: Option<char>,
data: &GlyphData) {
fn glyph_is_compressible(data: &GlyphData) -> bool {
is_simple_glyph_id(data.id)
&& is_simple_advance(data.advance)
&& data.offset == Point2D::zero()
&& data.cluster_start // others are stored in detail buffer
}
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.char_len());
let mut entry = match (data.is_missing, glyph_is_compressible(data)) {
(true, _) => GlyphEntry::missing(1),
(false, true) => GlyphEntry::simple(data.id, data.advance),
(false, false) => {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
}
};
// FIXME(pcwalton): Is this necessary? I think it's a no-op.
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
if character == Some(' ') {
|
entry = entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_char_index(&mut self, i: CharIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.char_len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let entry = match first_glyph_data.is_missing {
true => GlyphEntry::missing(glyph_count),
false => {
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count)
}
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
// used when a character index has no associated glyph---for example, a ligature continuation.
pub fn add_nonglyph_for_char_index(&mut self, i: CharIndex, cluster_start: bool, ligature_start: bool) {
assert!(i < self.char_len());
let entry = GlyphEntry::complex(cluster_start, ligature_start, 0);
debug!("adding spacer for chracter without associated glyph[idx={:?}]", i);
self.entry_buffer[i.to_usize()] = entry;
}
pub fn iter_glyphs_for_char_index(&'a self, i: CharIndex) -> GlyphIterator<'a> {
self.iter_glyphs_for_char_range(&Range::new(i, CharIndex(1)))
}
#[inline]
pub fn iter_glyphs_for_char_range(&'a self, rang: &Range<CharIndex>) -> GlyphIterator<'a> {
if rang.begin() >= self.char_len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if rang.end() > self.char_len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
char_index: rang.begin(),
char_range: rang.each_index(),
glyph_range: None,
}
}
#[inline]
pub fn advance_for_char_range(&self, rang: &Range<CharIndex>) -> Au {
self.iter_glyphs_for_char_
|
random_line_split
|
|
glyph.rs
|
8 = 0x2;
fn break_flag_to_enum(flag: u8) -> BreakType {
if (flag & BREAK_TYPE_NORMAL)!= 0 {
BreakType::Normal
} else if (flag & BREAK_TYPE_HYPHEN)!= 0 {
BreakType::Hyphen
} else {
BreakType::None
}
}
fn break_enum_to_flag(e: BreakType) -> u8 {
match e {
BreakType::None => BREAK_TYPE_NONE,
BreakType::Normal => BREAK_TYPE_NORMAL,
BreakType::Hyphen => BREAK_TYPE_HYPHEN,
}
}
// TODO: make this more type-safe.
static FLAG_CHAR_IS_SPACE: u32 = 0x10000000;
// These two bits store some BREAK_TYPE_* flags
static FLAG_CAN_BREAK_MASK: u32 = 0x60000000;
static FLAG_CAN_BREAK_SHIFT: u32 = 29;
static FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
static GLYPH_ADVANCE_MASK: u32 = 0x0FFF0000;
static GLYPH_ADVANCE_SHIFT: u32 = 16;
static GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char. If the char couldn't
// be mapped to a glyph (!FLAG_NOT_MISSING), then this actually holds
// the UTF8 code point instead.
static GLYPH_COUNT_MASK: u32 = 0x00FFFF00;
static GLYPH_COUNT_SHIFT: u32 = 8;
// N.B. following Gecko, these are all inverted so that a lot of
// missing chars can be memset with zeros in one fell swoop.
static FLAG_NOT_MISSING: u32 = 0x00000001;
static FLAG_NOT_CLUSTER_START: u32 = 0x00000002;
static FLAG_NOT_LIGATURE_GROUP_START: u32 = 0x00000004;
static FLAG_CHAR_IS_TAB: u32 = 0x00000008;
static FLAG_CHAR_IS_NEWLINE: u32 = 0x00000010;
//static FLAG_CHAR_IS_LOW_SURROGATE: u32 = 0x00000020;
//static CHAR_IDENTITY_FLAGS_MASK: u32 = 0x00000038;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
// getter methods
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
fn is_ligature_start(&self) -> bool {
self.has_flag(!FLAG_NOT_LIGATURE_GROUP_START)
}
fn is_cluster_start(&self) -> bool {
self.has_flag(!FLAG_NOT_CLUSTER_START)
}
// True if original char was normal (U+0020) space. Other chars may
// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
fn char_is_tab(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_TAB)
}
fn char_is_newline(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_NEWLINE)
}
fn can_break_before(&self) -> BreakType {
let flag = ((self.value & FLAG_CAN_BREAK_MASK) >> FLAG_CAN_BREAK_SHIFT) as u8;
break_flag_to_enum(flag)
}
// setter methods
#[inline(always)]
fn set_char_is_space(&self) -> GlyphEntry {
GlyphEntry::new(self.value | FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_tab(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_TAB)
}
#[inline(always)]
fn set_char_is_newline(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_NEWLINE)
}
#[inline(always)]
fn set_can_break_before(&self, e: BreakType) -> GlyphEntry {
let flag = (break_enum_to_flag(e) as u32) << FLAG_CAN_BREAK_SHIFT;
GlyphEntry::new(self.value | flag)
}
// helper methods
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
((self.value & GLYPH_COUNT_MASK) >> GLYPH_COUNT_SHIFT) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
#[inline(always)]
fn adapt_character_flags_of_entry(&self, other: GlyphEntry) -> GlyphEntry {
GlyphEntry { value: self.value | other.value }
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: CharIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: CharIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.push_all(glyphs);
self.lookup_is_sorted = false;
}
fn get_detailed_glyphs_for_entry(&'a self, entry_offset: CharIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn get_detailed_glyph_with_index(&'a self,
entry_offset: CharIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records : Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
is_missing: is_missing,
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, CharIndex),
Detail(&'a GlyphStore, CharIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
is_whitespace: bool,
}
int_range_index! {
#[derive(RustcEncodable)]
#[doc = "An index that refers to a character in a text run. This could \
point to the middle of a glyph."]
struct CharIndex(isize)
}
impl<'a> GlyphStore {
// Initializes the glyph store, but doesn't actually shape anything.
// Use the set_glyph, set_glyphs() methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: repeat(GlyphEntry::initial()).take(length)
.collect(),
detail_store: DetailedGlyphStore::new(),
is_whitespace: is_whitespace,
}
}
pub fn char_len(&self) -> CharIndex {
CharIndex(self.entry_buffer.len() as isize)
}
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
}
/// Adds a single glyph. If `character` is present, this represents a single character;
/// otherwise, this glyph represents multiple characters.
pub fn add_glyph_for_char_index(&mut self,
i: CharIndex,
character: Option<char>,
data: &GlyphData) {
fn glyph_is_compressible(data: &GlyphData) -> bool {
is_simple_glyph_id(data.id)
&& is_simple_advance(data.advance)
&& data.offset == Point2D::zero()
&& data.cluster_start // others are stored in detail buffer
}
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.char_len());
let mut entry = match (data.is_missing, glyph_is_compressible(data)) {
(true, _) => GlyphEntry::missing(1),
(false, true) => GlyphEntry::simple(data.id, data.advance),
(false, false) => {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
}
};
// FIXME(pcwalton): Is this necessary? I think it's a no-op.
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
if character == Some(' ') {
entry = entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_char_index(&mut self, i: CharIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.char_len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let entry = match first_glyph_data.is_missing {
true => GlyphEntry::missing(glyph_count),
false => {
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count)
}
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
// used when a character index has no associated glyph---for example, a ligature continuation.
pub fn add_nonglyph_for_char_index(&mut self, i: CharIndex, cluster_start: bool, ligature_start: bool) {
assert!(i < self.char_len());
let entry = GlyphEntry::complex(cluster_start, ligature_start, 0);
debug!("adding spacer for chracter without associated glyph[idx={:?}]", i);
self.entry_buffer[i.to_usize()] = entry;
}
pub fn iter_glyphs_for_char_index(&'a self, i: CharIndex) -> GlyphIterator<'a> {
self.iter_glyphs_for_char_range(&Range::new(i, CharIndex(1)))
}
#[inline]
pub fn iter_glyphs_for_char_range(&'a self, rang: &Range<CharIndex>) -> GlyphIterator<'a> {
if rang.begin() >= self.char_len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if rang.end() > self.char_len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
char_index: rang.begin(),
char_range: rang.each_index(),
glyph_range: None,
}
}
#[inline]
pub fn
|
(&self, rang: &Range<CharIndex>) -> Au {
self.iter_glyphs_for_char
|
advance_for_char_range
|
identifier_name
|
glyph.rs
|
0 {
BreakType::Normal
} else if (flag & BREAK_TYPE_HYPHEN)!= 0 {
BreakType::Hyphen
} else {
BreakType::None
}
}
fn break_enum_to_flag(e: BreakType) -> u8 {
match e {
BreakType::None => BREAK_TYPE_NONE,
BreakType::Normal => BREAK_TYPE_NORMAL,
BreakType::Hyphen => BREAK_TYPE_HYPHEN,
}
}
// TODO: make this more type-safe.
static FLAG_CHAR_IS_SPACE: u32 = 0x10000000;
// These two bits store some BREAK_TYPE_* flags
static FLAG_CAN_BREAK_MASK: u32 = 0x60000000;
static FLAG_CAN_BREAK_SHIFT: u32 = 29;
static FLAG_IS_SIMPLE_GLYPH: u32 = 0x80000000;
// glyph advance; in Au's.
static GLYPH_ADVANCE_MASK: u32 = 0x0FFF0000;
static GLYPH_ADVANCE_SHIFT: u32 = 16;
static GLYPH_ID_MASK: u32 = 0x0000FFFF;
// Non-simple glyphs (more than one glyph per char; missing glyph,
// newline, tab, large advance, or nonzero x/y offsets) may have one
// or more detailed glyphs associated with them. They are stored in a
// side array so that there is a 1:1 mapping of GlyphEntry to
// unicode char.
// The number of detailed glyphs for this char. If the char couldn't
// be mapped to a glyph (!FLAG_NOT_MISSING), then this actually holds
// the UTF8 code point instead.
static GLYPH_COUNT_MASK: u32 = 0x00FFFF00;
static GLYPH_COUNT_SHIFT: u32 = 8;
// N.B. following Gecko, these are all inverted so that a lot of
// missing chars can be memset with zeros in one fell swoop.
static FLAG_NOT_MISSING: u32 = 0x00000001;
static FLAG_NOT_CLUSTER_START: u32 = 0x00000002;
static FLAG_NOT_LIGATURE_GROUP_START: u32 = 0x00000004;
static FLAG_CHAR_IS_TAB: u32 = 0x00000008;
static FLAG_CHAR_IS_NEWLINE: u32 = 0x00000010;
//static FLAG_CHAR_IS_LOW_SURROGATE: u32 = 0x00000020;
//static CHAR_IDENTITY_FLAGS_MASK: u32 = 0x00000038;
fn is_simple_glyph_id(id: GlyphId) -> bool {
((id as u32) & GLYPH_ID_MASK) == id
}
fn is_simple_advance(advance: Au) -> bool {
advance >= Au(0) && {
let unsigned_au = advance.0 as u32;
(unsigned_au & (GLYPH_ADVANCE_MASK >> GLYPH_ADVANCE_SHIFT)) == unsigned_au
}
}
type DetailedGlyphCount = u16;
// Getters and setters for GlyphEntry. Setter methods are functional,
// because GlyphEntry is immutable and only a u32 in size.
impl GlyphEntry {
// getter methods
#[inline(always)]
fn advance(&self) -> Au {
Au(((self.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) as i32)
}
fn id(&self) -> GlyphId {
self.value & GLYPH_ID_MASK
}
fn is_ligature_start(&self) -> bool {
self.has_flag(!FLAG_NOT_LIGATURE_GROUP_START)
}
fn is_cluster_start(&self) -> bool {
self.has_flag(!FLAG_NOT_CLUSTER_START)
}
// True if original char was normal (U+0020) space. Other chars may
// map to space glyph, but this does not account for them.
fn char_is_space(&self) -> bool {
self.has_flag(FLAG_CHAR_IS_SPACE)
}
fn char_is_tab(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_TAB)
}
fn char_is_newline(&self) -> bool {
!self.is_simple() && self.has_flag(FLAG_CHAR_IS_NEWLINE)
}
fn can_break_before(&self) -> BreakType {
let flag = ((self.value & FLAG_CAN_BREAK_MASK) >> FLAG_CAN_BREAK_SHIFT) as u8;
break_flag_to_enum(flag)
}
// setter methods
#[inline(always)]
fn set_char_is_space(&self) -> GlyphEntry {
GlyphEntry::new(self.value | FLAG_CHAR_IS_SPACE)
}
#[inline(always)]
fn set_char_is_tab(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_TAB)
}
#[inline(always)]
fn set_char_is_newline(&self) -> GlyphEntry {
assert!(!self.is_simple());
GlyphEntry::new(self.value | FLAG_CHAR_IS_NEWLINE)
}
#[inline(always)]
fn set_can_break_before(&self, e: BreakType) -> GlyphEntry {
let flag = (break_enum_to_flag(e) as u32) << FLAG_CAN_BREAK_SHIFT;
GlyphEntry::new(self.value | flag)
}
// helper methods
fn glyph_count(&self) -> u16 {
assert!(!self.is_simple());
((self.value & GLYPH_COUNT_MASK) >> GLYPH_COUNT_SHIFT) as u16
}
#[inline(always)]
fn is_simple(&self) -> bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
#[inline(always)]
fn adapt_character_flags_of_entry(&self, other: GlyphEntry) -> GlyphEntry {
GlyphEntry { value: self.value | other.value }
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: CharIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: CharIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.push_all(glyphs);
self.lookup_is_sorted = false;
}
fn get_detailed_glyphs_for_entry(&'a self, entry_offset: CharIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn get_detailed_glyph_with_index(&'a self,
entry_offset: CharIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records : Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
is_missing: is_missing,
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, CharIndex),
Detail(&'a GlyphStore, CharIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
is_whitespace: bool,
}
int_range_index! {
#[derive(RustcEncodable)]
#[doc = "An index that refers to a character in a text run. This could \
point to the middle of a glyph."]
struct CharIndex(isize)
}
impl<'a> GlyphStore {
// Initializes the glyph store, but doesn't actually shape anything.
// Use the set_glyph, set_glyphs() methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: repeat(GlyphEntry::initial()).take(length)
.collect(),
detail_store: DetailedGlyphStore::new(),
is_whitespace: is_whitespace,
}
}
pub fn char_len(&self) -> CharIndex {
CharIndex(self.entry_buffer.len() as isize)
}
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
}
/// Adds a single glyph. If `character` is present, this represents a single character;
/// otherwise, this glyph represents multiple characters.
pub fn add_glyph_for_char_index(&mut self,
i: CharIndex,
character: Option<char>,
data: &GlyphData) {
fn glyph_is_compressible(data: &GlyphData) -> bool {
is_simple_glyph_id(data.id)
&& is_simple_advance(data.advance)
&& data.offset == Point2D::zero()
&& data.cluster_start // others are stored in detail buffer
}
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.char_len());
let mut entry = match (data.is_missing, glyph_is_compressible(data)) {
(true, _) => GlyphEntry::missing(1),
(false, true) => GlyphEntry::simple(data.id, data.advance),
(false, false) => {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
}
};
// FIXME(pcwalton): Is this necessary? I think it's a no-op.
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
if character == Some(' ') {
entry = entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_char_index(&mut self, i: CharIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.char_len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let entry = match first_glyph_data.is_missing {
true => GlyphEntry::missing(glyph_count),
false => {
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count)
}
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
// used when a character index has no associated glyph---for example, a ligature continuation.
pub fn add_nonglyph_for_char_index(&mut self, i: CharIndex, cluster_start: bool, ligature_start: bool) {
assert!(i < self.char_len());
let entry = GlyphEntry::complex(cluster_start, ligature_start, 0);
debug!("adding spacer for chracter without associated glyph[idx={:?}]", i);
self.entry_buffer[i.to_usize()] = entry;
}
pub fn iter_glyphs_for_char_index(&'a self, i: CharIndex) -> GlyphIterator<'a> {
self.iter_glyphs_for_char_range(&Range::new(i, CharIndex(1)))
}
#[inline]
pub fn iter_glyphs_for_char_range(&'a self, rang: &Range<CharIndex>) -> GlyphIterator<'a> {
if rang.begin() >= self.char_len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if rang.end() > self.char_len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
char_index: rang.begin(),
char_range: rang.each_index(),
glyph_range: None,
}
}
#[inline]
pub fn advance_for_char_range(&self, rang: &Range<CharIndex>) -> Au
|
{
self.iter_glyphs_for_char_range(rang)
.fold(Au(0), |advance, (_, glyph)| advance + glyph.advance())
}
|
identifier_body
|
|
glyph.rs
|
bool {
self.has_flag(FLAG_IS_SIMPLE_GLYPH)
}
#[inline(always)]
fn has_flag(&self, flag: u32) -> bool {
(self.value & flag)!= 0
}
#[inline(always)]
fn adapt_character_flags_of_entry(&self, other: GlyphEntry) -> GlyphEntry {
GlyphEntry { value: self.value | other.value }
}
}
// Stores data for a detailed glyph, in the case that several glyphs
// correspond to one character, or the glyph's data couldn't be packed.
#[derive(Clone, Debug, Copy)]
struct DetailedGlyph {
id: GlyphId,
// glyph's advance, in the text's direction (LTR or RTL)
advance: Au,
// glyph's offset from the font's em-box (from top-left)
offset: Point2D<Au>,
}
impl DetailedGlyph {
fn new(id: GlyphId, advance: Au, offset: Point2D<Au>) -> DetailedGlyph {
DetailedGlyph {
id: id,
advance: advance,
offset: offset,
}
}
}
#[derive(PartialEq, Clone, Eq, Debug, Copy)]
struct DetailedGlyphRecord {
// source string offset/GlyphEntry offset in the TextRun
entry_offset: CharIndex,
// offset into the detailed glyphs buffer
detail_offset: usize,
}
impl PartialOrd for DetailedGlyphRecord {
fn partial_cmp(&self, other: &DetailedGlyphRecord) -> Option<Ordering> {
self.entry_offset.partial_cmp(&other.entry_offset)
}
}
impl Ord for DetailedGlyphRecord {
fn cmp(&self, other: &DetailedGlyphRecord) -> Ordering {
self.entry_offset.cmp(&other.entry_offset)
}
}
// Manages the lookup table for detailed glyphs. Sorting is deferred
// until a lookup is actually performed; this matches the expected
// usage pattern of setting/appending all the detailed glyphs, and
// then querying without setting.
#[derive(Clone)]
struct DetailedGlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_buffer: Vec<DetailedGlyph>,
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
detail_lookup: Vec<DetailedGlyphRecord>,
lookup_is_sorted: bool,
}
impl<'a> DetailedGlyphStore {
fn new() -> DetailedGlyphStore {
DetailedGlyphStore {
detail_buffer: vec!(), // TODO: default size?
detail_lookup: vec!(),
lookup_is_sorted: false,
}
}
fn add_detailed_glyphs_for_entry(&mut self, entry_offset: CharIndex, glyphs: &[DetailedGlyph]) {
let entry = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: self.detail_buffer.len(),
};
debug!("Adding entry[off={:?}] for detailed glyphs: {:?}", entry_offset, glyphs);
/* TODO: don't actually assert this until asserts are compiled
in/out based on severity, debug/release, etc. This assertion
would wreck the complexity of the lookup.
See Rust Issue #3647, #2228, #3627 for related information.
do self.detail_lookup.borrow |arr| {
assert!arr.contains(entry)
}
*/
self.detail_lookup.push(entry);
self.detail_buffer.push_all(glyphs);
self.lookup_is_sorted = false;
}
fn get_detailed_glyphs_for_entry(&'a self, entry_offset: CharIndex, count: u16)
-> &'a [DetailedGlyph] {
debug!("Requesting detailed glyphs[n={}] for entry[off={:?}]", count, entry_offset);
// FIXME: Is this right? --pcwalton
// TODO: should fix this somewhere else
if count == 0 {
return &self.detail_buffer[0..0];
}
assert!((count as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (count as usize) <= self.detail_buffer.len());
// return a slice into the buffer
&self.detail_buffer[i.. i + count as usize]
}
fn get_detailed_glyph_with_index(&'a self,
entry_offset: CharIndex,
detail_offset: u16)
-> &'a DetailedGlyph {
assert!((detail_offset as usize) <= self.detail_buffer.len());
assert!(self.lookup_is_sorted);
let key = DetailedGlyphRecord {
entry_offset: entry_offset,
detail_offset: 0, // unused
};
let i = self.detail_lookup.binary_search_index(&key)
.expect("Invalid index not found in detailed glyph lookup table!");
assert!(i + (detail_offset as usize) < self.detail_buffer.len());
&self.detail_buffer[i + (detail_offset as usize)]
}
fn ensure_sorted(&mut self) {
if self.lookup_is_sorted {
return;
}
// Sorting a unique vector is surprisingly hard. The following
// code is a good argument for using DVecs, but they require
// immutable locations thus don't play well with freezing.
// Thar be dragons here. You have been warned. (Tips accepted.)
let mut unsorted_records: Vec<DetailedGlyphRecord> = vec!();
mem::swap(&mut self.detail_lookup, &mut unsorted_records);
let mut mut_records : Vec<DetailedGlyphRecord> = unsorted_records;
mut_records.sort_by(|a, b| {
if a < b {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut sorted_records = mut_records;
mem::swap(&mut self.detail_lookup, &mut sorted_records);
self.lookup_is_sorted = true;
}
}
// This struct is used by GlyphStore clients to provide new glyph data.
// It should be allocated on the stack and passed by reference to GlyphStore.
#[derive(Copy, Clone)]
pub struct GlyphData {
id: GlyphId,
advance: Au,
offset: Point2D<Au>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool,
}
impl GlyphData {
/// Creates a new entry for one glyph.
pub fn new(id: GlyphId,
advance: Au,
offset: Option<Point2D<Au>>,
is_missing: bool,
cluster_start: bool,
ligature_start: bool)
-> GlyphData {
GlyphData {
id: id,
advance: advance,
offset: offset.unwrap_or(Point2D::zero()),
is_missing: is_missing,
cluster_start: cluster_start,
ligature_start: ligature_start,
}
}
}
// This enum is a proxy that's provided to GlyphStore clients when iterating
// through glyphs (either for a particular TextRun offset, or all glyphs).
// Rather than eagerly assembling and copying glyph data, it only retrieves
// values as they are needed from the GlyphStore, using provided offsets.
#[derive(Copy, Clone)]
pub enum GlyphInfo<'a> {
Simple(&'a GlyphStore, CharIndex),
Detail(&'a GlyphStore, CharIndex, u16),
}
impl<'a> GlyphInfo<'a> {
pub fn id(self) -> GlyphId {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].id(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).id
}
}
}
#[inline(always)]
// FIXME: Resolution conflicts with IteratorUtil trait so adding trailing _
pub fn advance(self) -> Au {
match self {
GlyphInfo::Simple(store, entry_i) => store.entry_buffer[entry_i.to_usize()].advance(),
GlyphInfo::Detail(store, entry_i, detail_j) => {
store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).advance
}
}
}
pub fn offset(self) -> Option<Point2D<Au>> {
match self {
GlyphInfo::Simple(_, _) => None,
GlyphInfo::Detail(store, entry_i, detail_j) => {
Some(store.detail_store.get_detailed_glyph_with_index(entry_i, detail_j).offset)
}
}
}
}
/// Stores the glyph data belonging to a text run.
///
/// Simple glyphs are stored inline in the `entry_buffer`, detailed glyphs are
/// stored as pointers into the `detail_store`.
///
/// ~~~ignore
/// +- GlyphStore --------------------------------+
/// | +---+---+---+---+---+---+---+ |
/// | entry_buffer: | | s | | s | | s | s | | d = detailed
/// | +-|-+---+-|-+---+-|-+---+---+ | s = simple
/// | | | | |
/// | | +---+-------+ |
/// | | | |
/// | +-V-+-V-+ |
/// | detail_store: | d | d | |
/// | +---+---+ |
/// +---------------------------------------------+
/// ~~~
#[derive(Clone)]
pub struct GlyphStore {
// TODO(pcwalton): Allocation of this buffer is expensive. Consider a small-vector
// optimization.
/// A buffer of glyphs within the text run, in the order in which they
/// appear in the input text
entry_buffer: Vec<GlyphEntry>,
/// A store of the detailed glyph data. Detailed glyphs contained in the
/// `entry_buffer` point to locations in this data structure.
detail_store: DetailedGlyphStore,
is_whitespace: bool,
}
int_range_index! {
#[derive(RustcEncodable)]
#[doc = "An index that refers to a character in a text run. This could \
point to the middle of a glyph."]
struct CharIndex(isize)
}
impl<'a> GlyphStore {
// Initializes the glyph store, but doesn't actually shape anything.
// Use the set_glyph, set_glyphs() methods to store glyph data.
pub fn new(length: usize, is_whitespace: bool) -> GlyphStore {
assert!(length > 0);
GlyphStore {
entry_buffer: repeat(GlyphEntry::initial()).take(length)
.collect(),
detail_store: DetailedGlyphStore::new(),
is_whitespace: is_whitespace,
}
}
pub fn char_len(&self) -> CharIndex {
CharIndex(self.entry_buffer.len() as isize)
}
pub fn is_whitespace(&self) -> bool {
self.is_whitespace
}
pub fn finalize_changes(&mut self) {
self.detail_store.ensure_sorted();
}
/// Adds a single glyph. If `character` is present, this represents a single character;
/// otherwise, this glyph represents multiple characters.
pub fn add_glyph_for_char_index(&mut self,
i: CharIndex,
character: Option<char>,
data: &GlyphData) {
fn glyph_is_compressible(data: &GlyphData) -> bool {
is_simple_glyph_id(data.id)
&& is_simple_advance(data.advance)
&& data.offset == Point2D::zero()
&& data.cluster_start // others are stored in detail buffer
}
debug_assert!(data.ligature_start); // can't compress ligature continuation glyphs.
debug_assert!(i < self.char_len());
let mut entry = match (data.is_missing, glyph_is_compressible(data)) {
(true, _) => GlyphEntry::missing(1),
(false, true) => GlyphEntry::simple(data.id, data.advance),
(false, false) => {
let glyph = &[DetailedGlyph::new(data.id, data.advance, data.offset)];
self.detail_store.add_detailed_glyphs_for_entry(i, glyph);
GlyphEntry::complex(data.cluster_start, data.ligature_start, 1)
}
};
// FIXME(pcwalton): Is this necessary? I think it's a no-op.
entry = entry.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
if character == Some(' ') {
entry = entry.set_char_is_space()
}
self.entry_buffer[i.to_usize()] = entry;
}
pub fn add_glyphs_for_char_index(&mut self, i: CharIndex, data_for_glyphs: &[GlyphData]) {
assert!(i < self.char_len());
assert!(data_for_glyphs.len() > 0);
let glyph_count = data_for_glyphs.len();
let first_glyph_data = data_for_glyphs[0];
let entry = match first_glyph_data.is_missing {
true => GlyphEntry::missing(glyph_count),
false => {
let glyphs_vec: Vec<DetailedGlyph> = (0..glyph_count).map(|i| {
DetailedGlyph::new(data_for_glyphs[i].id,
data_for_glyphs[i].advance,
data_for_glyphs[i].offset)
}).collect();
self.detail_store.add_detailed_glyphs_for_entry(i, &glyphs_vec);
GlyphEntry::complex(first_glyph_data.cluster_start,
first_glyph_data.ligature_start,
glyph_count)
}
}.adapt_character_flags_of_entry(self.entry_buffer[i.to_usize()]);
debug!("Adding multiple glyphs[idx={:?}, count={}]: {:?}", i, glyph_count, entry);
self.entry_buffer[i.to_usize()] = entry;
}
// used when a character index has no associated glyph---for example, a ligature continuation.
pub fn add_nonglyph_for_char_index(&mut self, i: CharIndex, cluster_start: bool, ligature_start: bool) {
assert!(i < self.char_len());
let entry = GlyphEntry::complex(cluster_start, ligature_start, 0);
debug!("adding spacer for chracter without associated glyph[idx={:?}]", i);
self.entry_buffer[i.to_usize()] = entry;
}
pub fn iter_glyphs_for_char_index(&'a self, i: CharIndex) -> GlyphIterator<'a> {
self.iter_glyphs_for_char_range(&Range::new(i, CharIndex(1)))
}
#[inline]
pub fn iter_glyphs_for_char_range(&'a self, rang: &Range<CharIndex>) -> GlyphIterator<'a> {
if rang.begin() >= self.char_len() {
panic!("iter_glyphs_for_range: range.begin beyond length!");
}
if rang.end() > self.char_len() {
panic!("iter_glyphs_for_range: range.end beyond length!");
}
GlyphIterator {
store: self,
char_index: rang.begin(),
char_range: rang.each_index(),
glyph_range: None,
}
}
#[inline]
pub fn advance_for_char_range(&self, rang: &Range<CharIndex>) -> Au {
self.iter_glyphs_for_char_range(rang)
.fold(Au(0), |advance, (_, glyph)| advance + glyph.advance())
}
// getter methods
pub fn char_is_space(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_usize()].char_is_space()
}
pub fn char_is_tab(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_usize()].char_is_tab()
}
pub fn char_is_newline(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_usize()].char_is_newline()
}
pub fn is_ligature_start(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_usize()].is_ligature_start()
}
pub fn is_cluster_start(&self, i: CharIndex) -> bool {
assert!(i < self.char_len());
self.entry_buffer[i.to_usize()].is_cluster_start()
}
pub fn can_break_before(&self, i: CharIndex) -> BreakType {
assert!(i < self.char_len());
self.entry_buffer[i.to_usize()].can_break_before()
}
// setter methods
pub fn set_char_is_space(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_char_is_space();
}
pub fn set_char_is_tab(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_char_is_tab();
}
pub fn set_char_is_newline(&mut self, i: CharIndex) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_char_is_newline();
}
pub fn set_can_break_before(&mut self, i: CharIndex, t: BreakType) {
assert!(i < self.char_len());
let entry = self.entry_buffer[i.to_usize()];
self.entry_buffer[i.to_usize()] = entry.set_can_break_before(t);
}
pub fn space_count_in_range(&self, range: &Range<CharIndex>) -> u32 {
let mut spaces = 0;
for index in range.each_index() {
if self.char_is_space(index) {
spaces += 1
}
}
spaces
}
pub fn distribute_extra_space_in_range(&mut self, range: &Range<CharIndex>, space: f64) {
debug_assert!(space >= 0.0);
if range.is_empty() {
return
}
for index in range.each_index() {
// TODO(pcwalton): Handle spaces that are detailed glyphs -- these are uncommon but
// possible.
let entry = &mut self.entry_buffer[index.to_usize()];
if entry.is_simple() && entry.char_is_space() {
// FIXME(pcwalton): This can overflow for very large font-sizes.
let advance =
((entry.value & GLYPH_ADVANCE_MASK) >> GLYPH_ADVANCE_SHIFT) +
Au::from_f64_px(space).0 as u32;
entry.value = (entry.value &!GLYPH_ADVANCE_MASK) |
(advance << GLYPH_ADVANCE_SHIFT);
}
}
}
}
/// An iterator over the glyphs in a character range in a `GlyphStore`.
pub struct GlyphIterator<'a> {
store: &'a GlyphStore,
char_index: CharIndex,
char_range: EachIndex<isize, CharIndex>,
glyph_range: Option<EachIndex<isize, CharIndex>>,
}
impl<'a> GlyphIterator<'a> {
// Slow path when there is a glyph range.
#[inline(never)]
fn next_glyph_range(&mut self) -> Option<(CharIndex, GlyphInfo<'a>)> {
match self.glyph_range.as_mut().unwrap().next() {
Some(j) => Some((self.char_index,
GlyphInfo::Detail(self.store, self.char_index, j.get() as u16 /*??? */))),
None =>
|
{
// No more glyphs for current character. Try to get another.
self.glyph_range = None;
self.next()
}
|
conditional_block
|
|
args.rs
|
use super::abi::usercalls::{alloc, raw::ByteBuffer};
use crate::ffi::OsString;
use crate::fmt;
use crate::slice;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::os_str::Buf;
use crate::sys_common::FromInner;
#[cfg_attr(test, linkage = "available_externally")]
#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"]
static ARGS: AtomicUsize = AtomicUsize::new(0);
type ArgsStore = Vec<OsString>;
#[cfg_attr(test, allow(dead_code))]
pub unsafe fn init(argc: isize, argv: *const *const u8) {
if argc!= 0
|
}
pub fn args() -> Args {
let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() };
if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) }
}
pub struct Args(slice::Iter<'static, OsString>);
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.as_slice().fmt(f)
}
}
impl Iterator for Args {
type Item = OsString;
fn next(&mut self) -> Option<OsString> {
self.0.next().cloned()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl ExactSizeIterator for Args {
fn len(&self) -> usize {
self.0.len()
}
}
impl DoubleEndedIterator for Args {
fn next_back(&mut self) -> Option<OsString> {
self.0.next_back().cloned()
}
}
|
{
let args = unsafe { alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _) };
let args = args
.iter()
.map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() }))
.collect::<ArgsStore>();
ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed);
}
|
conditional_block
|
args.rs
|
use super::abi::usercalls::{alloc, raw::ByteBuffer};
use crate::ffi::OsString;
use crate::fmt;
use crate::slice;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::os_str::Buf;
use crate::sys_common::FromInner;
#[cfg_attr(test, linkage = "available_externally")]
#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"]
static ARGS: AtomicUsize = AtomicUsize::new(0);
type ArgsStore = Vec<OsString>;
#[cfg_attr(test, allow(dead_code))]
pub unsafe fn init(argc: isize, argv: *const *const u8) {
if argc!= 0 {
let args = unsafe { alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _) };
let args = args
.iter()
.map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() }))
.collect::<ArgsStore>();
ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed);
}
}
pub fn args() -> Args
|
pub struct Args(slice::Iter<'static, OsString>);
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.as_slice().fmt(f)
}
}
impl Iterator for Args {
type Item = OsString;
fn next(&mut self) -> Option<OsString> {
self.0.next().cloned()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl ExactSizeIterator for Args {
fn len(&self) -> usize {
self.0.len()
}
}
impl DoubleEndedIterator for Args {
fn next_back(&mut self) -> Option<OsString> {
self.0.next_back().cloned()
}
}
|
{
let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() };
if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) }
}
|
identifier_body
|
args.rs
|
use super::abi::usercalls::{alloc, raw::ByteBuffer};
use crate::ffi::OsString;
use crate::fmt;
use crate::slice;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::os_str::Buf;
use crate::sys_common::FromInner;
#[cfg_attr(test, linkage = "available_externally")]
#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"]
static ARGS: AtomicUsize = AtomicUsize::new(0);
type ArgsStore = Vec<OsString>;
|
#[cfg_attr(test, allow(dead_code))]
pub unsafe fn init(argc: isize, argv: *const *const u8) {
if argc!= 0 {
let args = unsafe { alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _) };
let args = args
.iter()
.map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() }))
.collect::<ArgsStore>();
ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed);
}
}
pub fn args() -> Args {
let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() };
if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) }
}
pub struct Args(slice::Iter<'static, OsString>);
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.as_slice().fmt(f)
}
}
impl Iterator for Args {
type Item = OsString;
fn next(&mut self) -> Option<OsString> {
self.0.next().cloned()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl ExactSizeIterator for Args {
fn len(&self) -> usize {
self.0.len()
}
}
impl DoubleEndedIterator for Args {
fn next_back(&mut self) -> Option<OsString> {
self.0.next_back().cloned()
}
}
|
random_line_split
|
|
args.rs
|
use super::abi::usercalls::{alloc, raw::ByteBuffer};
use crate::ffi::OsString;
use crate::fmt;
use crate::slice;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::os_str::Buf;
use crate::sys_common::FromInner;
#[cfg_attr(test, linkage = "available_externally")]
#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"]
static ARGS: AtomicUsize = AtomicUsize::new(0);
type ArgsStore = Vec<OsString>;
#[cfg_attr(test, allow(dead_code))]
pub unsafe fn
|
(argc: isize, argv: *const *const u8) {
if argc!= 0 {
let args = unsafe { alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _) };
let args = args
.iter()
.map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() }))
.collect::<ArgsStore>();
ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed);
}
}
pub fn args() -> Args {
let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() };
if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) }
}
pub struct Args(slice::Iter<'static, OsString>);
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.as_slice().fmt(f)
}
}
impl Iterator for Args {
type Item = OsString;
fn next(&mut self) -> Option<OsString> {
self.0.next().cloned()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl ExactSizeIterator for Args {
fn len(&self) -> usize {
self.0.len()
}
}
impl DoubleEndedIterator for Args {
fn next_back(&mut self) -> Option<OsString> {
self.0.next_back().cloned()
}
}
|
init
|
identifier_name
|
client.rs
|
// Copyright (c) 2016 P.Y. Laligand
use hyper::Client as HttpClient;
use serde_json::{self, Map, Value};
use std::io::Read;
use super::clip::Clip;
use super::error::XboxError;
use super::xuid::Xuid;
/// Header definition for the API key.
header! { (XAuth, "X-AUTH") => [String] }
/// Interface to the xboxapi.com API.
pub struct Client {
/// The API key to authenticate with the service.
api_key: String,
}
/// Base URL for API calls.
const BASE_URL: &'static str = "https://xboxapi.com/v2";
impl Client {
/// Creates a new instance with the given API key.
pub fn new(api_key: String) -> Client {
Client {api_key: api_key}
}
/// Matches a gamertag with its internal user identifier.
|
.and_then(|json| { json.as_u64().ok_or(XboxError::from("Unexpected id format")) })
.map(|value| { Xuid(value) })
}
/// Builds a clip object from its JSON representation.
fn create_clip(json: &mut Map<String, Value>) -> Result<Clip, XboxError> {
let get_value = |key: &str| -> Result<String, XboxError> {
json.get(key)
.and_then(|value| { value.as_str() })
.map(|string| { string.to_owned() })
.ok_or(XboxError::new(format!("Could not get key {}", key)))
};
let id = try!(get_value("gameClipId"));
let date = try!(get_value("datePublished"));
let url = try!(json.get("gameClipUris")
.and_then(|uris_value| { uris_value.as_array() })
.and_then(|uris| { uris[0].as_object() })
.and_then(|clip_uri| { clip_uri.get("uri") })
.and_then(|uri_value| { uri_value.as_str() })
.map(|uri| { uri.to_owned() })
.ok_or(XboxError::from("Could not find clip URIs")));
Ok(Clip { id: id, url: url, date: date })
}
/// Returns the list of available clips for the given user.
pub fn get_clips(&self, xuid: &Xuid) -> Result<Vec<Clip>, XboxError> {
let path = format!("{}/game-clips", xuid);
let mut json = try!(self.send_request(&path));
let json_string = json.to_string();
let clips = try!(json.as_array_mut().ok_or(
XboxError::new(format!("Unexpected clip array: {}", json_string))));
clips.iter_mut().map(|clip| {
let clip_string = clip.to_string();
let content = try!(clip.as_object_mut().ok_or(
XboxError::new(format!("Unexpected clip: {}", clip_string))));
Client::create_clip(content)
}).collect()
}
/// Issues a request to the xboxapi.com API.
///
/// Returns None if the request failed.
fn send_request(&self, path: &String) -> Result<Value, XboxError> {
let url = format!("{}/{}", BASE_URL, path);
let client = HttpClient::new();
let mut response = try!(client.get(&url)
.header(XAuth(self.api_key.clone()))
.send());
let mut buffer = String::new();
try!(response.read_to_string(&mut buffer));
let result: Value = try!(serde_json::from_str(&buffer));
Ok(result)
}
}
|
pub fn get_xuid(&self, gamertag: &String) -> Result<Xuid, XboxError> {
let path = format!("xuid/{}", gamertag);
self.send_request(&path)
|
random_line_split
|
client.rs
|
// Copyright (c) 2016 P.Y. Laligand
use hyper::Client as HttpClient;
use serde_json::{self, Map, Value};
use std::io::Read;
use super::clip::Clip;
use super::error::XboxError;
use super::xuid::Xuid;
/// Header definition for the API key.
header! { (XAuth, "X-AUTH") => [String] }
/// Interface to the xboxapi.com API.
pub struct Client {
/// The API key to authenticate with the service.
api_key: String,
}
/// Base URL for API calls.
const BASE_URL: &'static str = "https://xboxapi.com/v2";
impl Client {
/// Creates a new instance with the given API key.
pub fn new(api_key: String) -> Client {
Client {api_key: api_key}
}
/// Matches a gamertag with its internal user identifier.
pub fn get_xuid(&self, gamertag: &String) -> Result<Xuid, XboxError> {
let path = format!("xuid/{}", gamertag);
self.send_request(&path)
.and_then(|json| { json.as_u64().ok_or(XboxError::from("Unexpected id format")) })
.map(|value| { Xuid(value) })
}
/// Builds a clip object from its JSON representation.
fn create_clip(json: &mut Map<String, Value>) -> Result<Clip, XboxError>
|
/// Returns the list of available clips for the given user.
pub fn get_clips(&self, xuid: &Xuid) -> Result<Vec<Clip>, XboxError> {
let path = format!("{}/game-clips", xuid);
let mut json = try!(self.send_request(&path));
let json_string = json.to_string();
let clips = try!(json.as_array_mut().ok_or(
XboxError::new(format!("Unexpected clip array: {}", json_string))));
clips.iter_mut().map(|clip| {
let clip_string = clip.to_string();
let content = try!(clip.as_object_mut().ok_or(
XboxError::new(format!("Unexpected clip: {}", clip_string))));
Client::create_clip(content)
}).collect()
}
/// Issues a request to the xboxapi.com API.
///
/// Returns None if the request failed.
fn send_request(&self, path: &String) -> Result<Value, XboxError> {
let url = format!("{}/{}", BASE_URL, path);
let client = HttpClient::new();
let mut response = try!(client.get(&url)
.header(XAuth(self.api_key.clone()))
.send());
let mut buffer = String::new();
try!(response.read_to_string(&mut buffer));
let result: Value = try!(serde_json::from_str(&buffer));
Ok(result)
}
}
|
{
let get_value = |key: &str| -> Result<String, XboxError> {
json.get(key)
.and_then(|value| { value.as_str() })
.map(|string| { string.to_owned() })
.ok_or(XboxError::new(format!("Could not get key {}", key)))
};
let id = try!(get_value("gameClipId"));
let date = try!(get_value("datePublished"));
let url = try!(json.get("gameClipUris")
.and_then(|uris_value| { uris_value.as_array() })
.and_then(|uris| { uris[0].as_object() })
.and_then(|clip_uri| { clip_uri.get("uri") })
.and_then(|uri_value| { uri_value.as_str() })
.map(|uri| { uri.to_owned() })
.ok_or(XboxError::from("Could not find clip URIs")));
Ok(Clip { id: id, url: url, date: date })
}
|
identifier_body
|
client.rs
|
// Copyright (c) 2016 P.Y. Laligand
use hyper::Client as HttpClient;
use serde_json::{self, Map, Value};
use std::io::Read;
use super::clip::Clip;
use super::error::XboxError;
use super::xuid::Xuid;
/// Header definition for the API key.
header! { (XAuth, "X-AUTH") => [String] }
/// Interface to the xboxapi.com API.
pub struct
|
{
/// The API key to authenticate with the service.
api_key: String,
}
/// Base URL for API calls.
const BASE_URL: &'static str = "https://xboxapi.com/v2";
impl Client {
/// Creates a new instance with the given API key.
pub fn new(api_key: String) -> Client {
Client {api_key: api_key}
}
/// Matches a gamertag with its internal user identifier.
pub fn get_xuid(&self, gamertag: &String) -> Result<Xuid, XboxError> {
let path = format!("xuid/{}", gamertag);
self.send_request(&path)
.and_then(|json| { json.as_u64().ok_or(XboxError::from("Unexpected id format")) })
.map(|value| { Xuid(value) })
}
/// Builds a clip object from its JSON representation.
fn create_clip(json: &mut Map<String, Value>) -> Result<Clip, XboxError> {
let get_value = |key: &str| -> Result<String, XboxError> {
json.get(key)
.and_then(|value| { value.as_str() })
.map(|string| { string.to_owned() })
.ok_or(XboxError::new(format!("Could not get key {}", key)))
};
let id = try!(get_value("gameClipId"));
let date = try!(get_value("datePublished"));
let url = try!(json.get("gameClipUris")
.and_then(|uris_value| { uris_value.as_array() })
.and_then(|uris| { uris[0].as_object() })
.and_then(|clip_uri| { clip_uri.get("uri") })
.and_then(|uri_value| { uri_value.as_str() })
.map(|uri| { uri.to_owned() })
.ok_or(XboxError::from("Could not find clip URIs")));
Ok(Clip { id: id, url: url, date: date })
}
/// Returns the list of available clips for the given user.
pub fn get_clips(&self, xuid: &Xuid) -> Result<Vec<Clip>, XboxError> {
let path = format!("{}/game-clips", xuid);
let mut json = try!(self.send_request(&path));
let json_string = json.to_string();
let clips = try!(json.as_array_mut().ok_or(
XboxError::new(format!("Unexpected clip array: {}", json_string))));
clips.iter_mut().map(|clip| {
let clip_string = clip.to_string();
let content = try!(clip.as_object_mut().ok_or(
XboxError::new(format!("Unexpected clip: {}", clip_string))));
Client::create_clip(content)
}).collect()
}
/// Issues a request to the xboxapi.com API.
///
/// Returns None if the request failed.
fn send_request(&self, path: &String) -> Result<Value, XboxError> {
let url = format!("{}/{}", BASE_URL, path);
let client = HttpClient::new();
let mut response = try!(client.get(&url)
.header(XAuth(self.api_key.clone()))
.send());
let mut buffer = String::new();
try!(response.read_to_string(&mut buffer));
let result: Value = try!(serde_json::from_str(&buffer));
Ok(result)
}
}
|
Client
|
identifier_name
|
replay.rs
|
const REPLAY_BUFFER_SIZE: usize = 256;
const EMPTY_ENTRY: u64 = 0xFFFFFFFFFFFFFFFF;
pub struct ReplayProtection {
most_recent_sequence: u64,
received_packet: [u64; REPLAY_BUFFER_SIZE]
}
impl Clone for ReplayProtection {
fn clone(&self) -> ReplayProtection {
ReplayProtection {
most_recent_sequence: self.most_recent_sequence,
received_packet: self.received_packet
}
}
}
impl ReplayProtection {
pub fn
|
() -> ReplayProtection {
ReplayProtection {
most_recent_sequence: 0,
received_packet: [EMPTY_ENTRY; REPLAY_BUFFER_SIZE]
}
}
pub fn packet_already_received(&mut self, sequence: u64) -> bool {
if sequence & (1 << 63) == (1 << 63) {
return false;
}
if sequence + (REPLAY_BUFFER_SIZE as u64) <= self.most_recent_sequence {
return true
}
if sequence > self.most_recent_sequence {
self.most_recent_sequence = sequence;
}
let index = sequence as usize % REPLAY_BUFFER_SIZE;
if self.received_packet[index] == EMPTY_ENTRY {
self.received_packet[index] = sequence;
return false
}
if self.received_packet[index] >= sequence {
return true
}
self.received_packet[index] = sequence;
false
}
}
#[test]
fn test_replay_protection() {
for _ in 0..2 {
let mut replay_protection = ReplayProtection::new();
assert_eq!(replay_protection.most_recent_sequence, 0);
// sequence numbers with high bit set should be ignored
assert!(!replay_protection.packet_already_received(1<<63));
assert_eq!(replay_protection.most_recent_sequence, 0);
// the first time we receive packets, they should not be already received
const MAX_SEQUENCE: u64 = REPLAY_BUFFER_SIZE as u64 * 4;
for sequence in 0..MAX_SEQUENCE {
assert!(!replay_protection.packet_already_received(sequence));
}
// old packets outside buffer should be considered already received
assert!(replay_protection.packet_already_received(0));
// packets received a second time should be flagged already received
for sequence in MAX_SEQUENCE-10..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
// jumping ahead to a much higher sequence should be considered not already received
assert!(!replay_protection.packet_already_received(MAX_SEQUENCE + REPLAY_BUFFER_SIZE as u64));
// old packets should be considered already received
for sequence in 0..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
}
}
|
new
|
identifier_name
|
replay.rs
|
const REPLAY_BUFFER_SIZE: usize = 256;
const EMPTY_ENTRY: u64 = 0xFFFFFFFFFFFFFFFF;
pub struct ReplayProtection {
most_recent_sequence: u64,
received_packet: [u64; REPLAY_BUFFER_SIZE]
}
impl Clone for ReplayProtection {
fn clone(&self) -> ReplayProtection {
ReplayProtection {
most_recent_sequence: self.most_recent_sequence,
received_packet: self.received_packet
}
}
}
impl ReplayProtection {
pub fn new() -> ReplayProtection {
ReplayProtection {
most_recent_sequence: 0,
received_packet: [EMPTY_ENTRY; REPLAY_BUFFER_SIZE]
}
}
|
pub fn packet_already_received(&mut self, sequence: u64) -> bool {
if sequence & (1 << 63) == (1 << 63) {
return false;
}
if sequence + (REPLAY_BUFFER_SIZE as u64) <= self.most_recent_sequence {
return true
}
if sequence > self.most_recent_sequence {
self.most_recent_sequence = sequence;
}
let index = sequence as usize % REPLAY_BUFFER_SIZE;
if self.received_packet[index] == EMPTY_ENTRY {
self.received_packet[index] = sequence;
return false
}
if self.received_packet[index] >= sequence {
return true
}
self.received_packet[index] = sequence;
false
}
}
#[test]
fn test_replay_protection() {
for _ in 0..2 {
let mut replay_protection = ReplayProtection::new();
assert_eq!(replay_protection.most_recent_sequence, 0);
// sequence numbers with high bit set should be ignored
assert!(!replay_protection.packet_already_received(1<<63));
assert_eq!(replay_protection.most_recent_sequence, 0);
// the first time we receive packets, they should not be already received
const MAX_SEQUENCE: u64 = REPLAY_BUFFER_SIZE as u64 * 4;
for sequence in 0..MAX_SEQUENCE {
assert!(!replay_protection.packet_already_received(sequence));
}
// old packets outside buffer should be considered already received
assert!(replay_protection.packet_already_received(0));
// packets received a second time should be flagged already received
for sequence in MAX_SEQUENCE-10..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
// jumping ahead to a much higher sequence should be considered not already received
assert!(!replay_protection.packet_already_received(MAX_SEQUENCE + REPLAY_BUFFER_SIZE as u64));
// old packets should be considered already received
for sequence in 0..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
}
}
|
random_line_split
|
|
replay.rs
|
const REPLAY_BUFFER_SIZE: usize = 256;
const EMPTY_ENTRY: u64 = 0xFFFFFFFFFFFFFFFF;
pub struct ReplayProtection {
most_recent_sequence: u64,
received_packet: [u64; REPLAY_BUFFER_SIZE]
}
impl Clone for ReplayProtection {
fn clone(&self) -> ReplayProtection {
ReplayProtection {
most_recent_sequence: self.most_recent_sequence,
received_packet: self.received_packet
}
}
}
impl ReplayProtection {
pub fn new() -> ReplayProtection {
ReplayProtection {
most_recent_sequence: 0,
received_packet: [EMPTY_ENTRY; REPLAY_BUFFER_SIZE]
}
}
pub fn packet_already_received(&mut self, sequence: u64) -> bool {
if sequence & (1 << 63) == (1 << 63) {
return false;
}
if sequence + (REPLAY_BUFFER_SIZE as u64) <= self.most_recent_sequence
|
if sequence > self.most_recent_sequence {
self.most_recent_sequence = sequence;
}
let index = sequence as usize % REPLAY_BUFFER_SIZE;
if self.received_packet[index] == EMPTY_ENTRY {
self.received_packet[index] = sequence;
return false
}
if self.received_packet[index] >= sequence {
return true
}
self.received_packet[index] = sequence;
false
}
}
#[test]
fn test_replay_protection() {
for _ in 0..2 {
let mut replay_protection = ReplayProtection::new();
assert_eq!(replay_protection.most_recent_sequence, 0);
// sequence numbers with high bit set should be ignored
assert!(!replay_protection.packet_already_received(1<<63));
assert_eq!(replay_protection.most_recent_sequence, 0);
// the first time we receive packets, they should not be already received
const MAX_SEQUENCE: u64 = REPLAY_BUFFER_SIZE as u64 * 4;
for sequence in 0..MAX_SEQUENCE {
assert!(!replay_protection.packet_already_received(sequence));
}
// old packets outside buffer should be considered already received
assert!(replay_protection.packet_already_received(0));
// packets received a second time should be flagged already received
for sequence in MAX_SEQUENCE-10..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
// jumping ahead to a much higher sequence should be considered not already received
assert!(!replay_protection.packet_already_received(MAX_SEQUENCE + REPLAY_BUFFER_SIZE as u64));
// old packets should be considered already received
for sequence in 0..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
}
}
|
{
return true
}
|
conditional_block
|
replay.rs
|
const REPLAY_BUFFER_SIZE: usize = 256;
const EMPTY_ENTRY: u64 = 0xFFFFFFFFFFFFFFFF;
pub struct ReplayProtection {
most_recent_sequence: u64,
received_packet: [u64; REPLAY_BUFFER_SIZE]
}
impl Clone for ReplayProtection {
fn clone(&self) -> ReplayProtection
|
}
impl ReplayProtection {
pub fn new() -> ReplayProtection {
ReplayProtection {
most_recent_sequence: 0,
received_packet: [EMPTY_ENTRY; REPLAY_BUFFER_SIZE]
}
}
pub fn packet_already_received(&mut self, sequence: u64) -> bool {
if sequence & (1 << 63) == (1 << 63) {
return false;
}
if sequence + (REPLAY_BUFFER_SIZE as u64) <= self.most_recent_sequence {
return true
}
if sequence > self.most_recent_sequence {
self.most_recent_sequence = sequence;
}
let index = sequence as usize % REPLAY_BUFFER_SIZE;
if self.received_packet[index] == EMPTY_ENTRY {
self.received_packet[index] = sequence;
return false
}
if self.received_packet[index] >= sequence {
return true
}
self.received_packet[index] = sequence;
false
}
}
#[test]
fn test_replay_protection() {
for _ in 0..2 {
let mut replay_protection = ReplayProtection::new();
assert_eq!(replay_protection.most_recent_sequence, 0);
// sequence numbers with high bit set should be ignored
assert!(!replay_protection.packet_already_received(1<<63));
assert_eq!(replay_protection.most_recent_sequence, 0);
// the first time we receive packets, they should not be already received
const MAX_SEQUENCE: u64 = REPLAY_BUFFER_SIZE as u64 * 4;
for sequence in 0..MAX_SEQUENCE {
assert!(!replay_protection.packet_already_received(sequence));
}
// old packets outside buffer should be considered already received
assert!(replay_protection.packet_already_received(0));
// packets received a second time should be flagged already received
for sequence in MAX_SEQUENCE-10..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
// jumping ahead to a much higher sequence should be considered not already received
assert!(!replay_protection.packet_already_received(MAX_SEQUENCE + REPLAY_BUFFER_SIZE as u64));
// old packets should be considered already received
for sequence in 0..MAX_SEQUENCE {
assert!(replay_protection.packet_already_received(sequence));
}
}
}
|
{
ReplayProtection {
most_recent_sequence: self.most_recent_sequence,
received_packet: self.received_packet
}
}
|
identifier_body
|
draw.rs
|
use std::f32::consts::FRAC_PI_2 as QUARTER_TAU;
use glium::texture::{Texture2d, Texture2dDataSource};
use glium::Display;
use glium::Surface as SurfaceTrait;
use ui::{BB, Px};
use ui::color::Color;
use ui::render::{Renderer, Surface, Buffer, XYAndUV};
use ui::text::{FontFace, FontFaces};
pub use glium::glutin::MouseCursor;
pub trait Draw {
fn draw(&self, _cx: &mut DrawCx) {}
}
impl<A, B> Draw for (A, B) where
A: Draw,
B: Draw {
fn draw(&self, cx: &mut DrawCx) {
self.0.draw(cx);
self.1.draw(cx);
}
}
pub struct DrawCx<'a> {
renderer: &'a mut Renderer,
facade: &'a Display,
surface: Surface,
cursor: MouseCursor,
overlay_requested: bool,
overlay_drawing: bool,
inside_overlay: bool
}
impl<'a> DrawCx<'a> {
pub fn new(renderer: &'a mut Renderer, facade: &'a Display, surface: Surface)
-> DrawCx<'a> {
DrawCx {
renderer: renderer,
facade: facade,
surface: surface,
cursor: MouseCursor::Default,
overlay_requested: false,
overlay_drawing: false,
inside_overlay: false
}
}
pub fn fonts(&mut self) -> &mut FontFaces {
&mut self.renderer.fonts
}
pub fn dimensions(&mut self) -> [Px; 2] {
let (w, h) = self.surface.get_dimensions();
[w as Px, h as Px]
}
pub fn draw<T: Draw>(&mut self, x: &T) {
x.draw(self);
if self.overlay_requested {
self.overlay_drawing = true;
x.draw(self);
self.overlay_drawing = false;
self.overlay_requested = false;
}
}
pub fn draw_overlay<F, T>(&mut self, f: F) -> T where F: FnOnce(&mut DrawCx) -> T {
assert!(!self.inside_overlay);
self.inside_overlay = true;
let r = f(self);
self.inside_overlay = false;
self.overlay_requested = true;
r
}
pub fn with_surface<F, T>(&mut self, f: F) -> Option<T> where F: FnOnce(&mut Self) -> T {
if self.inside_overlay == self.overlay_drawing {
Some(f(self))
} else {
None
}
}
pub fn get_cursor(&self) -> MouseCursor {
self.cursor
}
pub fn cursor(&mut self, cursor: MouseCursor) {
self.with_surface(|this| this.cursor = cursor);
}
pub fn clear(&mut self, color: Color) {
self.renderer.clear(&mut self.surface, color);
}
// TODO make DrawCx linear to ensure this method gets called.
pub fn finish(self)
|
pub fn line(&mut self, from: [Px; 2], to: [Px; 2], width: Px, color: Color) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
buffer.line(from, to, width);
}));
}
pub fn fill(&mut self, bb: BB<Px>, color: Color/*, corner_radius: Px*/) {
let corner_radius: Px = 0.0;
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect(bb);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_round(bb, resolution, corner_radius);
}
}));
}
pub fn border(&mut self, bb: BB<Px>, color: Color, border_size: Px, corner_radius: Px) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect_border(bb, border_size);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_border_round(bb, border_size, resolution, corner_radius);
}
}));
}
pub fn textured<'d, D: Texture2dDataSource<'d>>(&mut self, bb: BB<Px>, image: D) {
self.with_surface(|this| {
let texture = Texture2d::new(this.facade, image).unwrap();
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, [1.0, 1.0, 1.0, 1.0], &texture, |buffer| {
buffer.rect(bb.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
});
}
pub fn text<F: FontFace>(&mut self, font: F, [x, y]: [Px; 2], color: Color, text: &str) {
self.with_surface(|this| {
let (mut x, y) = (x, y + this.fonts().metrics(font).baseline);
// TODO use graphemes and maybe harfbuzz.
for ch in text.chars() {
let glyph = this.fonts().glyph(font, ch).clone();
let texture = &glyph.texture;
let w = texture.get_width();
let h = texture.get_height().unwrap();
let [dx, dy] = glyph.offset;
let xy = BB::rect(x + dx, y + dy, w as Px, h as Px);
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, color, texture, |buffer| {
buffer.rect(xy.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
x += glyph.advance;
}
});
}
}
|
{
self.surface.finish().unwrap();
}
|
identifier_body
|
draw.rs
|
use std::f32::consts::FRAC_PI_2 as QUARTER_TAU;
use glium::texture::{Texture2d, Texture2dDataSource};
use glium::Display;
use glium::Surface as SurfaceTrait;
use ui::{BB, Px};
use ui::color::Color;
use ui::render::{Renderer, Surface, Buffer, XYAndUV};
use ui::text::{FontFace, FontFaces};
pub use glium::glutin::MouseCursor;
pub trait Draw {
fn draw(&self, _cx: &mut DrawCx) {}
}
impl<A, B> Draw for (A, B) where
A: Draw,
B: Draw {
fn draw(&self, cx: &mut DrawCx) {
self.0.draw(cx);
self.1.draw(cx);
}
}
pub struct DrawCx<'a> {
renderer: &'a mut Renderer,
facade: &'a Display,
surface: Surface,
cursor: MouseCursor,
overlay_requested: bool,
overlay_drawing: bool,
inside_overlay: bool
}
impl<'a> DrawCx<'a> {
pub fn new(renderer: &'a mut Renderer, facade: &'a Display, surface: Surface)
-> DrawCx<'a> {
DrawCx {
renderer: renderer,
facade: facade,
surface: surface,
cursor: MouseCursor::Default,
overlay_requested: false,
overlay_drawing: false,
inside_overlay: false
}
}
pub fn fonts(&mut self) -> &mut FontFaces {
&mut self.renderer.fonts
}
pub fn dimensions(&mut self) -> [Px; 2] {
let (w, h) = self.surface.get_dimensions();
[w as Px, h as Px]
}
pub fn draw<T: Draw>(&mut self, x: &T) {
x.draw(self);
if self.overlay_requested {
self.overlay_drawing = true;
|
self.overlay_drawing = false;
self.overlay_requested = false;
}
}
pub fn draw_overlay<F, T>(&mut self, f: F) -> T where F: FnOnce(&mut DrawCx) -> T {
assert!(!self.inside_overlay);
self.inside_overlay = true;
let r = f(self);
self.inside_overlay = false;
self.overlay_requested = true;
r
}
pub fn with_surface<F, T>(&mut self, f: F) -> Option<T> where F: FnOnce(&mut Self) -> T {
if self.inside_overlay == self.overlay_drawing {
Some(f(self))
} else {
None
}
}
pub fn get_cursor(&self) -> MouseCursor {
self.cursor
}
pub fn cursor(&mut self, cursor: MouseCursor) {
self.with_surface(|this| this.cursor = cursor);
}
pub fn clear(&mut self, color: Color) {
self.renderer.clear(&mut self.surface, color);
}
// TODO make DrawCx linear to ensure this method gets called.
pub fn finish(self) {
self.surface.finish().unwrap();
}
pub fn line(&mut self, from: [Px; 2], to: [Px; 2], width: Px, color: Color) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
buffer.line(from, to, width);
}));
}
pub fn fill(&mut self, bb: BB<Px>, color: Color/*, corner_radius: Px*/) {
let corner_radius: Px = 0.0;
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect(bb);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_round(bb, resolution, corner_radius);
}
}));
}
pub fn border(&mut self, bb: BB<Px>, color: Color, border_size: Px, corner_radius: Px) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect_border(bb, border_size);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_border_round(bb, border_size, resolution, corner_radius);
}
}));
}
pub fn textured<'d, D: Texture2dDataSource<'d>>(&mut self, bb: BB<Px>, image: D) {
self.with_surface(|this| {
let texture = Texture2d::new(this.facade, image).unwrap();
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, [1.0, 1.0, 1.0, 1.0], &texture, |buffer| {
buffer.rect(bb.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
});
}
pub fn text<F: FontFace>(&mut self, font: F, [x, y]: [Px; 2], color: Color, text: &str) {
self.with_surface(|this| {
let (mut x, y) = (x, y + this.fonts().metrics(font).baseline);
// TODO use graphemes and maybe harfbuzz.
for ch in text.chars() {
let glyph = this.fonts().glyph(font, ch).clone();
let texture = &glyph.texture;
let w = texture.get_width();
let h = texture.get_height().unwrap();
let [dx, dy] = glyph.offset;
let xy = BB::rect(x + dx, y + dy, w as Px, h as Px);
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, color, texture, |buffer| {
buffer.rect(xy.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
x += glyph.advance;
}
});
}
}
|
x.draw(self);
|
random_line_split
|
draw.rs
|
use std::f32::consts::FRAC_PI_2 as QUARTER_TAU;
use glium::texture::{Texture2d, Texture2dDataSource};
use glium::Display;
use glium::Surface as SurfaceTrait;
use ui::{BB, Px};
use ui::color::Color;
use ui::render::{Renderer, Surface, Buffer, XYAndUV};
use ui::text::{FontFace, FontFaces};
pub use glium::glutin::MouseCursor;
pub trait Draw {
fn draw(&self, _cx: &mut DrawCx) {}
}
impl<A, B> Draw for (A, B) where
A: Draw,
B: Draw {
fn draw(&self, cx: &mut DrawCx) {
self.0.draw(cx);
self.1.draw(cx);
}
}
pub struct DrawCx<'a> {
renderer: &'a mut Renderer,
facade: &'a Display,
surface: Surface,
cursor: MouseCursor,
overlay_requested: bool,
overlay_drawing: bool,
inside_overlay: bool
}
impl<'a> DrawCx<'a> {
pub fn new(renderer: &'a mut Renderer, facade: &'a Display, surface: Surface)
-> DrawCx<'a> {
DrawCx {
renderer: renderer,
facade: facade,
surface: surface,
cursor: MouseCursor::Default,
overlay_requested: false,
overlay_drawing: false,
inside_overlay: false
}
}
pub fn fonts(&mut self) -> &mut FontFaces {
&mut self.renderer.fonts
}
pub fn dimensions(&mut self) -> [Px; 2] {
let (w, h) = self.surface.get_dimensions();
[w as Px, h as Px]
}
pub fn draw<T: Draw>(&mut self, x: &T) {
x.draw(self);
if self.overlay_requested {
self.overlay_drawing = true;
x.draw(self);
self.overlay_drawing = false;
self.overlay_requested = false;
}
}
pub fn draw_overlay<F, T>(&mut self, f: F) -> T where F: FnOnce(&mut DrawCx) -> T {
assert!(!self.inside_overlay);
self.inside_overlay = true;
let r = f(self);
self.inside_overlay = false;
self.overlay_requested = true;
r
}
pub fn with_surface<F, T>(&mut self, f: F) -> Option<T> where F: FnOnce(&mut Self) -> T {
if self.inside_overlay == self.overlay_drawing {
Some(f(self))
} else {
None
}
}
pub fn get_cursor(&self) -> MouseCursor {
self.cursor
}
pub fn
|
(&mut self, cursor: MouseCursor) {
self.with_surface(|this| this.cursor = cursor);
}
pub fn clear(&mut self, color: Color) {
self.renderer.clear(&mut self.surface, color);
}
// TODO make DrawCx linear to ensure this method gets called.
pub fn finish(self) {
self.surface.finish().unwrap();
}
pub fn line(&mut self, from: [Px; 2], to: [Px; 2], width: Px, color: Color) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
buffer.line(from, to, width);
}));
}
pub fn fill(&mut self, bb: BB<Px>, color: Color/*, corner_radius: Px*/) {
let corner_radius: Px = 0.0;
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect(bb);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_round(bb, resolution, corner_radius);
}
}));
}
pub fn border(&mut self, bb: BB<Px>, color: Color, border_size: Px, corner_radius: Px) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect_border(bb, border_size);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_border_round(bb, border_size, resolution, corner_radius);
}
}));
}
pub fn textured<'d, D: Texture2dDataSource<'d>>(&mut self, bb: BB<Px>, image: D) {
self.with_surface(|this| {
let texture = Texture2d::new(this.facade, image).unwrap();
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, [1.0, 1.0, 1.0, 1.0], &texture, |buffer| {
buffer.rect(bb.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
});
}
pub fn text<F: FontFace>(&mut self, font: F, [x, y]: [Px; 2], color: Color, text: &str) {
self.with_surface(|this| {
let (mut x, y) = (x, y + this.fonts().metrics(font).baseline);
// TODO use graphemes and maybe harfbuzz.
for ch in text.chars() {
let glyph = this.fonts().glyph(font, ch).clone();
let texture = &glyph.texture;
let w = texture.get_width();
let h = texture.get_height().unwrap();
let [dx, dy] = glyph.offset;
let xy = BB::rect(x + dx, y + dy, w as Px, h as Px);
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, color, texture, |buffer| {
buffer.rect(xy.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
x += glyph.advance;
}
});
}
}
|
cursor
|
identifier_name
|
draw.rs
|
use std::f32::consts::FRAC_PI_2 as QUARTER_TAU;
use glium::texture::{Texture2d, Texture2dDataSource};
use glium::Display;
use glium::Surface as SurfaceTrait;
use ui::{BB, Px};
use ui::color::Color;
use ui::render::{Renderer, Surface, Buffer, XYAndUV};
use ui::text::{FontFace, FontFaces};
pub use glium::glutin::MouseCursor;
pub trait Draw {
fn draw(&self, _cx: &mut DrawCx) {}
}
impl<A, B> Draw for (A, B) where
A: Draw,
B: Draw {
fn draw(&self, cx: &mut DrawCx) {
self.0.draw(cx);
self.1.draw(cx);
}
}
pub struct DrawCx<'a> {
renderer: &'a mut Renderer,
facade: &'a Display,
surface: Surface,
cursor: MouseCursor,
overlay_requested: bool,
overlay_drawing: bool,
inside_overlay: bool
}
impl<'a> DrawCx<'a> {
pub fn new(renderer: &'a mut Renderer, facade: &'a Display, surface: Surface)
-> DrawCx<'a> {
DrawCx {
renderer: renderer,
facade: facade,
surface: surface,
cursor: MouseCursor::Default,
overlay_requested: false,
overlay_drawing: false,
inside_overlay: false
}
}
pub fn fonts(&mut self) -> &mut FontFaces {
&mut self.renderer.fonts
}
pub fn dimensions(&mut self) -> [Px; 2] {
let (w, h) = self.surface.get_dimensions();
[w as Px, h as Px]
}
pub fn draw<T: Draw>(&mut self, x: &T) {
x.draw(self);
if self.overlay_requested {
self.overlay_drawing = true;
x.draw(self);
self.overlay_drawing = false;
self.overlay_requested = false;
}
}
pub fn draw_overlay<F, T>(&mut self, f: F) -> T where F: FnOnce(&mut DrawCx) -> T {
assert!(!self.inside_overlay);
self.inside_overlay = true;
let r = f(self);
self.inside_overlay = false;
self.overlay_requested = true;
r
}
pub fn with_surface<F, T>(&mut self, f: F) -> Option<T> where F: FnOnce(&mut Self) -> T {
if self.inside_overlay == self.overlay_drawing {
Some(f(self))
} else {
None
}
}
pub fn get_cursor(&self) -> MouseCursor {
self.cursor
}
pub fn cursor(&mut self, cursor: MouseCursor) {
self.with_surface(|this| this.cursor = cursor);
}
pub fn clear(&mut self, color: Color) {
self.renderer.clear(&mut self.surface, color);
}
// TODO make DrawCx linear to ensure this method gets called.
pub fn finish(self) {
self.surface.finish().unwrap();
}
pub fn line(&mut self, from: [Px; 2], to: [Px; 2], width: Px, color: Color) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
buffer.line(from, to, width);
}));
}
pub fn fill(&mut self, bb: BB<Px>, color: Color/*, corner_radius: Px*/) {
let corner_radius: Px = 0.0;
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect(bb);
} else
|
}));
}
pub fn border(&mut self, bb: BB<Px>, color: Color, border_size: Px, corner_radius: Px) {
self.with_surface(|this| this.renderer.colored(&mut this.surface, color, |buffer| {
if corner_radius == 0.0 {
buffer.rect_border(bb, border_size);
} else {
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_border_round(bb, border_size, resolution, corner_radius);
}
}));
}
pub fn textured<'d, D: Texture2dDataSource<'d>>(&mut self, bb: BB<Px>, image: D) {
self.with_surface(|this| {
let texture = Texture2d::new(this.facade, image).unwrap();
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, [1.0, 1.0, 1.0, 1.0], &texture, |buffer| {
buffer.rect(bb.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
});
}
pub fn text<F: FontFace>(&mut self, font: F, [x, y]: [Px; 2], color: Color, text: &str) {
self.with_surface(|this| {
let (mut x, y) = (x, y + this.fonts().metrics(font).baseline);
// TODO use graphemes and maybe harfbuzz.
for ch in text.chars() {
let glyph = this.fonts().glyph(font, ch).clone();
let texture = &glyph.texture;
let w = texture.get_width();
let h = texture.get_height().unwrap();
let [dx, dy] = glyph.offset;
let xy = BB::rect(x + dx, y + dy, w as Px, h as Px);
let uv = BB::rect(0.0, 1.0, 1.0, -1.0);
this.renderer.textured(&mut this.surface, color, texture, |buffer| {
buffer.rect(xy.zip(uv).map(|(a, b)| XYAndUV(a, b)))
});
x += glyph.advance;
}
});
}
}
|
{
let resolution = (QUARTER_TAU * corner_radius).ceil() as u32;
buffer.rect_round(bb, resolution, corner_radius);
}
|
conditional_block
|
lib.rs
|
// This file is part of Grust, GObject introspection bindings for Rust
//
// Copyright (C) 2013-2015 Mikhail Zabaluev <[email protected]>
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
|
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#![crate_name = "grust"]
#![crate_type = "lib"]
#![allow(unstable_features)]
#![feature(unsafe_no_drop_flag)]
extern crate libc;
extern crate gtypes;
extern crate glib_2_0_sys as glib;
extern crate gobject_2_0_sys as gobject;
#[macro_use]
mod macros;
pub mod boxed;
pub mod enumeration;
pub mod error;
pub mod flags;
pub mod gstr;
pub mod gtype;
pub mod mainloop;
pub mod object;
pub mod quark;
pub mod refcount;
pub mod types;
pub mod util;
pub mod value;
pub mod wrap;
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
random_line_split
|
lib.rs
|
//
// Copyright 2015 Richard W. Branson
// Copyright 2015 The Rust Project Developers.
//
// See LICENSE file at top level directory.
//
extern crate libc;
use std::error::Error;
use std::io;
use std::fmt;
use libc::{c_void, c_int};
use std::ops::Drop;
use std::ptr;
use self::MemoryMapKind::*;
use self::MapOption::*;
use self::MapError::*;
#[cfg(windows)]
use std::mem;
fn errno() -> i32 {
io::Error::last_os_error().raw_os_error().unwrap_or(-1)
}
#[cfg(unix)]
fn page_size() -> usize {
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
#[cfg(windows)]
fn page_size() -> usize {
unsafe {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
return info.dwPageSize as usize;
}
}
/// A memory mapped file or chunk of memory. This is a very system-specific
/// interface to the OS's memory mapping facilities (`mmap` on POSIX,
/// `VirtualAlloc`/`CreateFileMapping` on Windows). It makes no attempt at
/// abstracting platform differences, besides in error values returned. Consider
/// yourself warned.
///
/// The memory map is released (unmapped) when the destructor is run, so don't
/// let it leave scope by accident if you want it to stick around.
pub struct MemoryMap {
data: *mut u8,
len: usize,
kind: MemoryMapKind,
}
/// Type of memory map
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone)]
pub enum MemoryMapKind {
/// Virtual memory map. Usually used to change the permissions of a given
/// chunk of memory. Corresponds to `VirtualAlloc` on Windows.
MapFile(*const u8),
/// Virtual memory map. Usually used to change the permissions of a given
/// chunk of memory, or for allocation. Corresponds to `VirtualAlloc` on
/// Windows.
MapVirtual
}
/// Options the memory map is created with
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone)]
pub enum MapOption {
/// The memory should be readable
MapReadable,
/// The memory should be writable
MapWritable,
/// The memory should be executable
MapExecutable,
/// Create a map for a specific address range. Corresponds to `MAP_FIXED` on
/// POSIX.
MapAddr(*const u8),
/// Create a memory mapping for a file with a given HANDLE.
#[cfg(windows)]
MapFd(libc::HANDLE),
/// Create a memory mapping for a file with a given fd.
#[cfg(not(windows))]
MapFd(c_int),
/// When using `MapFd`, the start of the map is `usize` bytes from the start
/// of the file.
MapOffset(usize),
/// On POSIX, this can be used to specify the default flags passed to
/// `mmap`. By default it uses `MAP_PRIVATE` and, if not using `MapFd`,
/// `MAP_ANON`. This will override both of those. This is platform-specific
/// (the exact values used) and ignored on Windows.
MapNonStandardFlags(c_int),
}
/// Possible errors when creating a map.
#[derive(Copy, Clone, Debug)]
pub enum MapError {
/// # The following are POSIX-specific
///
/// fd was not open for reading or, if using `MapWritable`, was not open for
/// writing.
ErrFdNotAvail,
/// fd was not valid
ErrInvalidFd,
/// Either the address given by `MapAddr` or offset given by `MapOffset` was
/// not a multiple of `MemoryMap::granularity` (unaligned to page size).
ErrUnaligned,
/// With `MapFd`, the fd does not support mapping.
ErrNoMapSupport,
/// If using `MapAddr`, the address + `min_len` was outside of the process's
/// address space. If using `MapFd`, the target of the fd didn't have enough
/// resources to fulfill the request.
ErrNoMem,
/// A zero-length map was requested. This is invalid according to
/// [POSIX](http://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html).
/// Not all platforms obey this, but this wrapper does.
ErrZeroLength,
/// Unrecognized error. The inner value is the unrecognized errno.
ErrUnknown(isize),
/// # The following are Windows-specific
///
/// Unsupported combination of protection flags
/// (`MapReadable`/`MapWritable`/`MapExecutable`).
ErrUnsupProt,
/// When using `MapFd`, `MapOffset` was given (Windows does not support this
/// at all)
ErrUnsupOffset,
/// When using `MapFd`, there was already a mapping to the file.
ErrAlreadyExists,
/// Unrecognized error from `VirtualAlloc`. The inner value is the return
/// value of GetLastError.
ErrVirtualAlloc(i32),
/// Unrecognized error from `CreateFileMapping`. The inner value is the
/// return value of `GetLastError`.
ErrCreateFileMappingW(i32),
/// Unrecognized error from `MapViewOfFile`. The inner value is the return
/// value of `GetLastError`.
ErrMapViewOfFile(i32)
}
impl fmt::Display for MapError {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
ErrFdNotAvail => "fd not available for reading or writing",
ErrInvalidFd => "Invalid fd",
ErrUnaligned => {
"Unaligned address, invalid flags, negative length or \
unaligned offset"
}
ErrNoMapSupport=> "File doesn't support mapping",
ErrNoMem => "Invalid address, or not enough available memory",
ErrUnsupProt => "Protection mode unsupported",
ErrUnsupOffset => "Offset in virtual memory mode is unsupported",
ErrAlreadyExists => "File mapping for specified file already exists",
ErrZeroLength => "Zero-length mapping not allowed",
ErrUnknown(code) => {
return write!(out, "Unknown error = {}", code)
},
ErrVirtualAlloc(code) => {
return write!(out, "VirtualAlloc failure = {}", code)
},
ErrCreateFileMappingW(code) => {
return write!(out, "CreateFileMappingW failure = {}", code)
},
ErrMapViewOfFile(code) => {
return write!(out, "MapViewOfFile failure = {}", code)
}
};
write!(out, "{}", str)
}
}
impl Error for MapError {
fn description(&self) -> &str { "memory map error" }
}
// Round up `from` to be divisible by `to`
fn round_up(from: usize, to: usize) -> usize {
let r = if from % to == 0 {
from
} else {
from + to - (from % to)
};
if r == 0 {
to
} else {
r
}
}
#[cfg(unix)]
impl MemoryMap {
/// Create a new mapping with the given `options`, at least `min_len` bytes
/// long. `min_len` must be greater than zero; see the note on
/// `ErrZeroLength`.
pub fn new(min_len: usize, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::off_t;
if min_len == 0 {
return Err(ErrZeroLength)
}
let mut addr: *const u8 = ptr::null();
let mut prot = 0;
let mut flags = libc::MAP_PRIVATE;
let mut fd = -1;
let mut offset = 0;
let mut custom_flags = false;
let len = round_up(min_len, page_size());
for &o in options {
match o {
MapReadable => { prot |= libc::PROT_READ; },
MapWritable => { prot |= libc::PROT_WRITE; },
MapExecutable => { prot |= libc::PROT_EXEC; },
MapAddr(addr_) => {
flags |= libc::MAP_FIXED;
addr = addr_;
},
MapFd(fd_) => {
flags |= libc::MAP_FILE;
fd = fd_;
},
MapOffset(offset_) => { offset = offset_ as off_t; },
MapNonStandardFlags(f) => { custom_flags = true; flags = f },
}
}
if fd == -1 &&!custom_flags { flags |= libc::MAP_ANON; }
let r = unsafe {
libc::mmap(addr as *mut c_void, len as libc::size_t, prot, flags,
fd, offset)
};
if r == libc::MAP_FAILED {
Err(match errno() {
libc::EACCES => ErrFdNotAvail,
libc::EBADF => ErrInvalidFd,
libc::EINVAL => ErrUnaligned,
libc::ENODEV => ErrNoMapSupport,
libc::ENOMEM => ErrNoMem,
code => ErrUnknown(code as isize)
})
} else {
Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: if fd == -1 {
MapVirtual
} else {
MapFile(ptr::null())
}
})
}
}
/// Granularity that the offset or address must be for `MapOffset` and
/// `MapAddr` respectively.
pub fn granularity() -> usize {
page_size()
}
}
#[cfg(unix)]
impl Drop for MemoryMap {
/// Unmap the mapping. Panics the task if `munmap` panics.
fn drop(&mut self) {
if self.len == 0 { /* workaround for dummy_stack */ return; }
unsafe {
// `munmap` only panics due to logic errors
libc::munmap(self.data as *mut c_void, self.len as libc::size_t);
}
}
}
#[cfg(windows)]
impl MemoryMap {
/// Create a new mapping with the given `options`, at least `min_len` bytes long.
#[allow(non_snake_case)]
pub fn new(min_len: usize, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::types::os::arch::extra::{LPVOID, DWORD, SIZE_T};
let mut lpAddress: LPVOID = ptr::null_mut();
let mut readable = false;
let mut writable = false;
let mut executable = false;
let mut handle = None;
let mut offset: usize = 0;
let len = round_up(min_len, page_size());
for &o in options {
match o {
MapReadable => { readable = true; },
MapWritable => { writable = true; },
MapExecutable => { executable = true; }
MapAddr(addr_) => { lpAddress = addr_ as LPVOID; },
MapFd(handle_) => { handle = Some(handle_); },
MapOffset(offset_) =>
|
,
MapNonStandardFlags(..) => {}
}
}
let flProtect = match (executable, readable, writable) {
(false, false, false) if handle.is_none() => libc::PAGE_NOACCESS,
(false, true, false) => libc::PAGE_READONLY,
(false, true, true) => libc::PAGE_READWRITE,
(true, false, false) if handle.is_none() => libc::PAGE_EXECUTE,
(true, true, false) => libc::PAGE_EXECUTE_READ,
(true, true, true) => libc::PAGE_EXECUTE_READWRITE,
_ => return Err(ErrUnsupProt)
};
if let Some(handle) = handle {
let dwDesiredAccess = match (executable, readable, writable) {
(false, true, false) => libc::FILE_MAP_READ,
(false, true, true) => libc::FILE_MAP_WRITE,
(true, true, false) => libc::FILE_MAP_READ | libc::FILE_MAP_EXECUTE,
(true, true, true) => libc::FILE_MAP_WRITE | libc::FILE_MAP_EXECUTE,
_ => return Err(ErrUnsupProt) // Actually, because of the check above,
// we should never get here.
};
unsafe {
let hFile = handle;
let mapping = libc::CreateFileMappingW(hFile,
ptr::null_mut(),
flProtect,
0,
0,
ptr::null());
if mapping == ptr::null_mut() {
return Err(ErrCreateFileMappingW(errno()));
}
if errno() as c_int == libc::ERROR_ALREADY_EXISTS {
return Err(ErrAlreadyExists);
}
let r = libc::MapViewOfFile(mapping,
dwDesiredAccess,
((len as u64) >> 32) as DWORD,
(offset & 0xffff_ffff) as DWORD,
0);
match r as usize {
0 => Err(ErrMapViewOfFile(errno())),
_ => Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: MapFile(mapping as *const u8)
})
}
}
} else {
if offset!= 0 {
return Err(ErrUnsupOffset);
}
let r = unsafe {
libc::VirtualAlloc(lpAddress,
len as SIZE_T,
libc::MEM_COMMIT | libc::MEM_RESERVE,
flProtect)
};
match r as usize {
0 => Err(ErrVirtualAlloc(errno())),
_ => Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: MapVirtual
})
}
}
}
/// Granularity of MapAddr() and MapOffset() parameter values.
/// This may be greater than the value returned by page_size().
pub fn granularity() -> usize {
use std::mem;
unsafe {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
return info.dwAllocationGranularity as usize;
}
}
}
#[cfg(windows)]
impl Drop for MemoryMap {
/// Unmap the mapping. Panics the task if any of `VirtualFree`,
/// `UnmapViewOfFile`, or `CloseHandle` fail.
fn drop(&mut self) {
use libc::types::os::arch::extra::{LPCVOID, HANDLE};
use libc::consts::os::extra::FALSE;
if self.len == 0 { return }
unsafe {
match self.kind {
MapVirtual => {
if libc::VirtualFree(self.data as *mut c_void, 0,
libc::MEM_RELEASE) == 0 {
println!("VirtualFree failed: {}", errno());
}
},
MapFile(mapping) => {
if libc::UnmapViewOfFile(self.data as LPCVOID) == FALSE {
println!("UnmapViewOfFile failed: {}", errno());
}
if libc::CloseHandle(mapping as HANDLE) == FALSE {
println!("CloseHandle failed: {}", errno());
}
}
}
}
}
}
impl MemoryMap {
/// Returns the pointer to the memory created or modified by this map.
#[inline(always)]
pub fn data(&self) -> *mut u8 { self.data }
/// Returns the number of bytes this map applies to.
#[inline(always)]
pub fn len(&self) -> usize { self.len }
/// Returns the type of mapping this represents.
pub fn kind(&self) -> MemoryMapKind { self.kind }
}
#[cfg(test)]
mod tests {
extern crate libc;
extern crate tempdir;
use super::{MemoryMap, MapOption};
#[test]
fn memory_map_rw() {
let chunk = match MemoryMap::new(16, &[
MapOption::MapReadable,
MapOption::MapWritable
]) {
Ok(chunk) => chunk,
Err(msg) => panic!("{:?}", msg)
};
assert!(chunk.len >= 16);
unsafe {
*chunk.data = 0xBE;
assert!(*chunk.data == 0xBE);
}
}
#[test]
fn memory_map_file() {
use std::fs;
use std::io::{Seek, SeekFrom, Write};
#[cfg(unix)]
fn get_fd(file: &fs::File) -> libc::c_int {
use std::os::unix::io::AsRawFd;
file.as_raw_fd()
}
#[cfg(windows)]
fn get_fd(file: &fs::File) -> libc::HANDLE {
use std::os::windows::io::AsRawHandle;
file.as_raw_handle() as libc::HANDLE
}
let tmpdir = tempdir::TempDir::new("").unwrap();
let mut path = tmpdir.path().to_path_buf();
path.push("mmap_file.tmp");
let size = MemoryMap::granularity() * 2;
let mut file = fs::OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&path)
.unwrap();
file.seek(SeekFrom::Start(size as u64)).unwrap();
file.write(b"\0").unwrap();
let fd = get_fd(&file);
let chunk = MemoryMap::new(size / 2, &[
MapOption::MapReadable,
MapOption::MapWritable,
MapOption::MapFd(fd),
MapOption::MapOffset(size / 2)
]).unwrap();
assert!(chunk.len > 0);
unsafe {
*chunk.data = 0xbe;
assert!(*chunk.data == 0xbe);
}
drop(chunk);
fs::remove_file(&path).unwrap();
}
}
|
{ offset = offset_; }
|
conditional_block
|
lib.rs
|
//
// Copyright 2015 Richard W. Branson
// Copyright 2015 The Rust Project Developers.
//
// See LICENSE file at top level directory.
//
extern crate libc;
use std::error::Error;
use std::io;
use std::fmt;
use libc::{c_void, c_int};
use std::ops::Drop;
use std::ptr;
use self::MemoryMapKind::*;
use self::MapOption::*;
use self::MapError::*;
#[cfg(windows)]
use std::mem;
fn errno() -> i32 {
io::Error::last_os_error().raw_os_error().unwrap_or(-1)
}
#[cfg(unix)]
fn
|
() -> usize {
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
#[cfg(windows)]
fn page_size() -> usize {
unsafe {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
return info.dwPageSize as usize;
}
}
/// A memory mapped file or chunk of memory. This is a very system-specific
/// interface to the OS's memory mapping facilities (`mmap` on POSIX,
/// `VirtualAlloc`/`CreateFileMapping` on Windows). It makes no attempt at
/// abstracting platform differences, besides in error values returned. Consider
/// yourself warned.
///
/// The memory map is released (unmapped) when the destructor is run, so don't
/// let it leave scope by accident if you want it to stick around.
pub struct MemoryMap {
data: *mut u8,
len: usize,
kind: MemoryMapKind,
}
/// Type of memory map
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone)]
pub enum MemoryMapKind {
/// Virtual memory map. Usually used to change the permissions of a given
/// chunk of memory. Corresponds to `VirtualAlloc` on Windows.
MapFile(*const u8),
/// Virtual memory map. Usually used to change the permissions of a given
/// chunk of memory, or for allocation. Corresponds to `VirtualAlloc` on
/// Windows.
MapVirtual
}
/// Options the memory map is created with
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone)]
pub enum MapOption {
/// The memory should be readable
MapReadable,
/// The memory should be writable
MapWritable,
/// The memory should be executable
MapExecutable,
/// Create a map for a specific address range. Corresponds to `MAP_FIXED` on
/// POSIX.
MapAddr(*const u8),
/// Create a memory mapping for a file with a given HANDLE.
#[cfg(windows)]
MapFd(libc::HANDLE),
/// Create a memory mapping for a file with a given fd.
#[cfg(not(windows))]
MapFd(c_int),
/// When using `MapFd`, the start of the map is `usize` bytes from the start
/// of the file.
MapOffset(usize),
/// On POSIX, this can be used to specify the default flags passed to
/// `mmap`. By default it uses `MAP_PRIVATE` and, if not using `MapFd`,
/// `MAP_ANON`. This will override both of those. This is platform-specific
/// (the exact values used) and ignored on Windows.
MapNonStandardFlags(c_int),
}
/// Possible errors when creating a map.
#[derive(Copy, Clone, Debug)]
pub enum MapError {
/// # The following are POSIX-specific
///
/// fd was not open for reading or, if using `MapWritable`, was not open for
/// writing.
ErrFdNotAvail,
/// fd was not valid
ErrInvalidFd,
/// Either the address given by `MapAddr` or offset given by `MapOffset` was
/// not a multiple of `MemoryMap::granularity` (unaligned to page size).
ErrUnaligned,
/// With `MapFd`, the fd does not support mapping.
ErrNoMapSupport,
/// If using `MapAddr`, the address + `min_len` was outside of the process's
/// address space. If using `MapFd`, the target of the fd didn't have enough
/// resources to fulfill the request.
ErrNoMem,
/// A zero-length map was requested. This is invalid according to
/// [POSIX](http://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html).
/// Not all platforms obey this, but this wrapper does.
ErrZeroLength,
/// Unrecognized error. The inner value is the unrecognized errno.
ErrUnknown(isize),
/// # The following are Windows-specific
///
/// Unsupported combination of protection flags
/// (`MapReadable`/`MapWritable`/`MapExecutable`).
ErrUnsupProt,
/// When using `MapFd`, `MapOffset` was given (Windows does not support this
/// at all)
ErrUnsupOffset,
/// When using `MapFd`, there was already a mapping to the file.
ErrAlreadyExists,
/// Unrecognized error from `VirtualAlloc`. The inner value is the return
/// value of GetLastError.
ErrVirtualAlloc(i32),
/// Unrecognized error from `CreateFileMapping`. The inner value is the
/// return value of `GetLastError`.
ErrCreateFileMappingW(i32),
/// Unrecognized error from `MapViewOfFile`. The inner value is the return
/// value of `GetLastError`.
ErrMapViewOfFile(i32)
}
impl fmt::Display for MapError {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
ErrFdNotAvail => "fd not available for reading or writing",
ErrInvalidFd => "Invalid fd",
ErrUnaligned => {
"Unaligned address, invalid flags, negative length or \
unaligned offset"
}
ErrNoMapSupport=> "File doesn't support mapping",
ErrNoMem => "Invalid address, or not enough available memory",
ErrUnsupProt => "Protection mode unsupported",
ErrUnsupOffset => "Offset in virtual memory mode is unsupported",
ErrAlreadyExists => "File mapping for specified file already exists",
ErrZeroLength => "Zero-length mapping not allowed",
ErrUnknown(code) => {
return write!(out, "Unknown error = {}", code)
},
ErrVirtualAlloc(code) => {
return write!(out, "VirtualAlloc failure = {}", code)
},
ErrCreateFileMappingW(code) => {
return write!(out, "CreateFileMappingW failure = {}", code)
},
ErrMapViewOfFile(code) => {
return write!(out, "MapViewOfFile failure = {}", code)
}
};
write!(out, "{}", str)
}
}
impl Error for MapError {
fn description(&self) -> &str { "memory map error" }
}
// Round up `from` to be divisible by `to`
fn round_up(from: usize, to: usize) -> usize {
let r = if from % to == 0 {
from
} else {
from + to - (from % to)
};
if r == 0 {
to
} else {
r
}
}
#[cfg(unix)]
impl MemoryMap {
/// Create a new mapping with the given `options`, at least `min_len` bytes
/// long. `min_len` must be greater than zero; see the note on
/// `ErrZeroLength`.
pub fn new(min_len: usize, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::off_t;
if min_len == 0 {
return Err(ErrZeroLength)
}
let mut addr: *const u8 = ptr::null();
let mut prot = 0;
let mut flags = libc::MAP_PRIVATE;
let mut fd = -1;
let mut offset = 0;
let mut custom_flags = false;
let len = round_up(min_len, page_size());
for &o in options {
match o {
MapReadable => { prot |= libc::PROT_READ; },
MapWritable => { prot |= libc::PROT_WRITE; },
MapExecutable => { prot |= libc::PROT_EXEC; },
MapAddr(addr_) => {
flags |= libc::MAP_FIXED;
addr = addr_;
},
MapFd(fd_) => {
flags |= libc::MAP_FILE;
fd = fd_;
},
MapOffset(offset_) => { offset = offset_ as off_t; },
MapNonStandardFlags(f) => { custom_flags = true; flags = f },
}
}
if fd == -1 &&!custom_flags { flags |= libc::MAP_ANON; }
let r = unsafe {
libc::mmap(addr as *mut c_void, len as libc::size_t, prot, flags,
fd, offset)
};
if r == libc::MAP_FAILED {
Err(match errno() {
libc::EACCES => ErrFdNotAvail,
libc::EBADF => ErrInvalidFd,
libc::EINVAL => ErrUnaligned,
libc::ENODEV => ErrNoMapSupport,
libc::ENOMEM => ErrNoMem,
code => ErrUnknown(code as isize)
})
} else {
Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: if fd == -1 {
MapVirtual
} else {
MapFile(ptr::null())
}
})
}
}
/// Granularity that the offset or address must be for `MapOffset` and
/// `MapAddr` respectively.
pub fn granularity() -> usize {
page_size()
}
}
#[cfg(unix)]
impl Drop for MemoryMap {
/// Unmap the mapping. Panics the task if `munmap` panics.
fn drop(&mut self) {
if self.len == 0 { /* workaround for dummy_stack */ return; }
unsafe {
// `munmap` only panics due to logic errors
libc::munmap(self.data as *mut c_void, self.len as libc::size_t);
}
}
}
#[cfg(windows)]
impl MemoryMap {
/// Create a new mapping with the given `options`, at least `min_len` bytes long.
#[allow(non_snake_case)]
pub fn new(min_len: usize, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::types::os::arch::extra::{LPVOID, DWORD, SIZE_T};
let mut lpAddress: LPVOID = ptr::null_mut();
let mut readable = false;
let mut writable = false;
let mut executable = false;
let mut handle = None;
let mut offset: usize = 0;
let len = round_up(min_len, page_size());
for &o in options {
match o {
MapReadable => { readable = true; },
MapWritable => { writable = true; },
MapExecutable => { executable = true; }
MapAddr(addr_) => { lpAddress = addr_ as LPVOID; },
MapFd(handle_) => { handle = Some(handle_); },
MapOffset(offset_) => { offset = offset_; },
MapNonStandardFlags(..) => {}
}
}
let flProtect = match (executable, readable, writable) {
(false, false, false) if handle.is_none() => libc::PAGE_NOACCESS,
(false, true, false) => libc::PAGE_READONLY,
(false, true, true) => libc::PAGE_READWRITE,
(true, false, false) if handle.is_none() => libc::PAGE_EXECUTE,
(true, true, false) => libc::PAGE_EXECUTE_READ,
(true, true, true) => libc::PAGE_EXECUTE_READWRITE,
_ => return Err(ErrUnsupProt)
};
if let Some(handle) = handle {
let dwDesiredAccess = match (executable, readable, writable) {
(false, true, false) => libc::FILE_MAP_READ,
(false, true, true) => libc::FILE_MAP_WRITE,
(true, true, false) => libc::FILE_MAP_READ | libc::FILE_MAP_EXECUTE,
(true, true, true) => libc::FILE_MAP_WRITE | libc::FILE_MAP_EXECUTE,
_ => return Err(ErrUnsupProt) // Actually, because of the check above,
// we should never get here.
};
unsafe {
let hFile = handle;
let mapping = libc::CreateFileMappingW(hFile,
ptr::null_mut(),
flProtect,
0,
0,
ptr::null());
if mapping == ptr::null_mut() {
return Err(ErrCreateFileMappingW(errno()));
}
if errno() as c_int == libc::ERROR_ALREADY_EXISTS {
return Err(ErrAlreadyExists);
}
let r = libc::MapViewOfFile(mapping,
dwDesiredAccess,
((len as u64) >> 32) as DWORD,
(offset & 0xffff_ffff) as DWORD,
0);
match r as usize {
0 => Err(ErrMapViewOfFile(errno())),
_ => Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: MapFile(mapping as *const u8)
})
}
}
} else {
if offset!= 0 {
return Err(ErrUnsupOffset);
}
let r = unsafe {
libc::VirtualAlloc(lpAddress,
len as SIZE_T,
libc::MEM_COMMIT | libc::MEM_RESERVE,
flProtect)
};
match r as usize {
0 => Err(ErrVirtualAlloc(errno())),
_ => Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: MapVirtual
})
}
}
}
/// Granularity of MapAddr() and MapOffset() parameter values.
/// This may be greater than the value returned by page_size().
pub fn granularity() -> usize {
use std::mem;
unsafe {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
return info.dwAllocationGranularity as usize;
}
}
}
#[cfg(windows)]
impl Drop for MemoryMap {
/// Unmap the mapping. Panics the task if any of `VirtualFree`,
/// `UnmapViewOfFile`, or `CloseHandle` fail.
fn drop(&mut self) {
use libc::types::os::arch::extra::{LPCVOID, HANDLE};
use libc::consts::os::extra::FALSE;
if self.len == 0 { return }
unsafe {
match self.kind {
MapVirtual => {
if libc::VirtualFree(self.data as *mut c_void, 0,
libc::MEM_RELEASE) == 0 {
println!("VirtualFree failed: {}", errno());
}
},
MapFile(mapping) => {
if libc::UnmapViewOfFile(self.data as LPCVOID) == FALSE {
println!("UnmapViewOfFile failed: {}", errno());
}
if libc::CloseHandle(mapping as HANDLE) == FALSE {
println!("CloseHandle failed: {}", errno());
}
}
}
}
}
}
impl MemoryMap {
/// Returns the pointer to the memory created or modified by this map.
#[inline(always)]
pub fn data(&self) -> *mut u8 { self.data }
/// Returns the number of bytes this map applies to.
#[inline(always)]
pub fn len(&self) -> usize { self.len }
/// Returns the type of mapping this represents.
pub fn kind(&self) -> MemoryMapKind { self.kind }
}
#[cfg(test)]
mod tests {
extern crate libc;
extern crate tempdir;
use super::{MemoryMap, MapOption};
#[test]
fn memory_map_rw() {
let chunk = match MemoryMap::new(16, &[
MapOption::MapReadable,
MapOption::MapWritable
]) {
Ok(chunk) => chunk,
Err(msg) => panic!("{:?}", msg)
};
assert!(chunk.len >= 16);
unsafe {
*chunk.data = 0xBE;
assert!(*chunk.data == 0xBE);
}
}
#[test]
fn memory_map_file() {
use std::fs;
use std::io::{Seek, SeekFrom, Write};
#[cfg(unix)]
fn get_fd(file: &fs::File) -> libc::c_int {
use std::os::unix::io::AsRawFd;
file.as_raw_fd()
}
#[cfg(windows)]
fn get_fd(file: &fs::File) -> libc::HANDLE {
use std::os::windows::io::AsRawHandle;
file.as_raw_handle() as libc::HANDLE
}
let tmpdir = tempdir::TempDir::new("").unwrap();
let mut path = tmpdir.path().to_path_buf();
path.push("mmap_file.tmp");
let size = MemoryMap::granularity() * 2;
let mut file = fs::OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&path)
.unwrap();
file.seek(SeekFrom::Start(size as u64)).unwrap();
file.write(b"\0").unwrap();
let fd = get_fd(&file);
let chunk = MemoryMap::new(size / 2, &[
MapOption::MapReadable,
MapOption::MapWritable,
MapOption::MapFd(fd),
MapOption::MapOffset(size / 2)
]).unwrap();
assert!(chunk.len > 0);
unsafe {
*chunk.data = 0xbe;
assert!(*chunk.data == 0xbe);
}
drop(chunk);
fs::remove_file(&path).unwrap();
}
}
|
page_size
|
identifier_name
|
lib.rs
|
//
// Copyright 2015 Richard W. Branson
// Copyright 2015 The Rust Project Developers.
//
// See LICENSE file at top level directory.
//
extern crate libc;
use std::error::Error;
use std::io;
use std::fmt;
use libc::{c_void, c_int};
use std::ops::Drop;
use std::ptr;
use self::MemoryMapKind::*;
use self::MapOption::*;
use self::MapError::*;
#[cfg(windows)]
use std::mem;
fn errno() -> i32 {
io::Error::last_os_error().raw_os_error().unwrap_or(-1)
}
#[cfg(unix)]
fn page_size() -> usize {
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
#[cfg(windows)]
fn page_size() -> usize {
unsafe {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
return info.dwPageSize as usize;
}
}
/// A memory mapped file or chunk of memory. This is a very system-specific
/// interface to the OS's memory mapping facilities (`mmap` on POSIX,
/// `VirtualAlloc`/`CreateFileMapping` on Windows). It makes no attempt at
/// abstracting platform differences, besides in error values returned. Consider
/// yourself warned.
///
/// The memory map is released (unmapped) when the destructor is run, so don't
/// let it leave scope by accident if you want it to stick around.
pub struct MemoryMap {
data: *mut u8,
len: usize,
kind: MemoryMapKind,
}
/// Type of memory map
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone)]
pub enum MemoryMapKind {
/// Virtual memory map. Usually used to change the permissions of a given
/// chunk of memory. Corresponds to `VirtualAlloc` on Windows.
MapFile(*const u8),
/// Virtual memory map. Usually used to change the permissions of a given
/// chunk of memory, or for allocation. Corresponds to `VirtualAlloc` on
/// Windows.
MapVirtual
}
/// Options the memory map is created with
#[allow(raw_pointer_derive)]
#[derive(Copy, Clone)]
pub enum MapOption {
/// The memory should be readable
MapReadable,
/// The memory should be writable
MapWritable,
/// The memory should be executable
MapExecutable,
/// Create a map for a specific address range. Corresponds to `MAP_FIXED` on
/// POSIX.
MapAddr(*const u8),
/// Create a memory mapping for a file with a given HANDLE.
#[cfg(windows)]
MapFd(libc::HANDLE),
/// Create a memory mapping for a file with a given fd.
#[cfg(not(windows))]
MapFd(c_int),
/// When using `MapFd`, the start of the map is `usize` bytes from the start
/// of the file.
MapOffset(usize),
/// On POSIX, this can be used to specify the default flags passed to
/// `mmap`. By default it uses `MAP_PRIVATE` and, if not using `MapFd`,
/// `MAP_ANON`. This will override both of those. This is platform-specific
/// (the exact values used) and ignored on Windows.
MapNonStandardFlags(c_int),
}
/// Possible errors when creating a map.
#[derive(Copy, Clone, Debug)]
pub enum MapError {
/// # The following are POSIX-specific
///
/// fd was not open for reading or, if using `MapWritable`, was not open for
/// writing.
ErrFdNotAvail,
/// fd was not valid
ErrInvalidFd,
/// Either the address given by `MapAddr` or offset given by `MapOffset` was
/// not a multiple of `MemoryMap::granularity` (unaligned to page size).
ErrUnaligned,
/// With `MapFd`, the fd does not support mapping.
ErrNoMapSupport,
/// If using `MapAddr`, the address + `min_len` was outside of the process's
/// address space. If using `MapFd`, the target of the fd didn't have enough
/// resources to fulfill the request.
ErrNoMem,
/// A zero-length map was requested. This is invalid according to
/// [POSIX](http://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html).
/// Not all platforms obey this, but this wrapper does.
ErrZeroLength,
/// Unrecognized error. The inner value is the unrecognized errno.
ErrUnknown(isize),
/// # The following are Windows-specific
///
/// Unsupported combination of protection flags
/// (`MapReadable`/`MapWritable`/`MapExecutable`).
ErrUnsupProt,
/// When using `MapFd`, `MapOffset` was given (Windows does not support this
/// at all)
ErrUnsupOffset,
/// When using `MapFd`, there was already a mapping to the file.
ErrAlreadyExists,
/// Unrecognized error from `VirtualAlloc`. The inner value is the return
/// value of GetLastError.
ErrVirtualAlloc(i32),
/// Unrecognized error from `CreateFileMapping`. The inner value is the
/// return value of `GetLastError`.
ErrCreateFileMappingW(i32),
/// Unrecognized error from `MapViewOfFile`. The inner value is the return
/// value of `GetLastError`.
ErrMapViewOfFile(i32)
}
impl fmt::Display for MapError {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
let str = match *self {
ErrFdNotAvail => "fd not available for reading or writing",
ErrInvalidFd => "Invalid fd",
ErrUnaligned => {
"Unaligned address, invalid flags, negative length or \
unaligned offset"
}
ErrNoMapSupport=> "File doesn't support mapping",
ErrNoMem => "Invalid address, or not enough available memory",
ErrUnsupProt => "Protection mode unsupported",
ErrUnsupOffset => "Offset in virtual memory mode is unsupported",
ErrAlreadyExists => "File mapping for specified file already exists",
ErrZeroLength => "Zero-length mapping not allowed",
ErrUnknown(code) => {
return write!(out, "Unknown error = {}", code)
},
ErrVirtualAlloc(code) => {
return write!(out, "VirtualAlloc failure = {}", code)
},
ErrCreateFileMappingW(code) => {
return write!(out, "CreateFileMappingW failure = {}", code)
},
ErrMapViewOfFile(code) => {
return write!(out, "MapViewOfFile failure = {}", code)
}
};
write!(out, "{}", str)
}
}
impl Error for MapError {
fn description(&self) -> &str { "memory map error" }
}
// Round up `from` to be divisible by `to`
fn round_up(from: usize, to: usize) -> usize {
let r = if from % to == 0 {
from
} else {
from + to - (from % to)
};
if r == 0 {
to
} else {
r
}
}
#[cfg(unix)]
impl MemoryMap {
/// Create a new mapping with the given `options`, at least `min_len` bytes
/// long. `min_len` must be greater than zero; see the note on
/// `ErrZeroLength`.
pub fn new(min_len: usize, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::off_t;
if min_len == 0 {
return Err(ErrZeroLength)
}
let mut addr: *const u8 = ptr::null();
let mut prot = 0;
let mut flags = libc::MAP_PRIVATE;
let mut fd = -1;
let mut offset = 0;
let mut custom_flags = false;
let len = round_up(min_len, page_size());
for &o in options {
match o {
MapReadable => { prot |= libc::PROT_READ; },
MapWritable => { prot |= libc::PROT_WRITE; },
MapExecutable => { prot |= libc::PROT_EXEC; },
MapAddr(addr_) => {
flags |= libc::MAP_FIXED;
addr = addr_;
},
|
},
MapOffset(offset_) => { offset = offset_ as off_t; },
MapNonStandardFlags(f) => { custom_flags = true; flags = f },
}
}
if fd == -1 &&!custom_flags { flags |= libc::MAP_ANON; }
let r = unsafe {
libc::mmap(addr as *mut c_void, len as libc::size_t, prot, flags,
fd, offset)
};
if r == libc::MAP_FAILED {
Err(match errno() {
libc::EACCES => ErrFdNotAvail,
libc::EBADF => ErrInvalidFd,
libc::EINVAL => ErrUnaligned,
libc::ENODEV => ErrNoMapSupport,
libc::ENOMEM => ErrNoMem,
code => ErrUnknown(code as isize)
})
} else {
Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: if fd == -1 {
MapVirtual
} else {
MapFile(ptr::null())
}
})
}
}
/// Granularity that the offset or address must be for `MapOffset` and
/// `MapAddr` respectively.
pub fn granularity() -> usize {
page_size()
}
}
#[cfg(unix)]
impl Drop for MemoryMap {
/// Unmap the mapping. Panics the task if `munmap` panics.
fn drop(&mut self) {
if self.len == 0 { /* workaround for dummy_stack */ return; }
unsafe {
// `munmap` only panics due to logic errors
libc::munmap(self.data as *mut c_void, self.len as libc::size_t);
}
}
}
#[cfg(windows)]
impl MemoryMap {
/// Create a new mapping with the given `options`, at least `min_len` bytes long.
#[allow(non_snake_case)]
pub fn new(min_len: usize, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::types::os::arch::extra::{LPVOID, DWORD, SIZE_T};
let mut lpAddress: LPVOID = ptr::null_mut();
let mut readable = false;
let mut writable = false;
let mut executable = false;
let mut handle = None;
let mut offset: usize = 0;
let len = round_up(min_len, page_size());
for &o in options {
match o {
MapReadable => { readable = true; },
MapWritable => { writable = true; },
MapExecutable => { executable = true; }
MapAddr(addr_) => { lpAddress = addr_ as LPVOID; },
MapFd(handle_) => { handle = Some(handle_); },
MapOffset(offset_) => { offset = offset_; },
MapNonStandardFlags(..) => {}
}
}
let flProtect = match (executable, readable, writable) {
(false, false, false) if handle.is_none() => libc::PAGE_NOACCESS,
(false, true, false) => libc::PAGE_READONLY,
(false, true, true) => libc::PAGE_READWRITE,
(true, false, false) if handle.is_none() => libc::PAGE_EXECUTE,
(true, true, false) => libc::PAGE_EXECUTE_READ,
(true, true, true) => libc::PAGE_EXECUTE_READWRITE,
_ => return Err(ErrUnsupProt)
};
if let Some(handle) = handle {
let dwDesiredAccess = match (executable, readable, writable) {
(false, true, false) => libc::FILE_MAP_READ,
(false, true, true) => libc::FILE_MAP_WRITE,
(true, true, false) => libc::FILE_MAP_READ | libc::FILE_MAP_EXECUTE,
(true, true, true) => libc::FILE_MAP_WRITE | libc::FILE_MAP_EXECUTE,
_ => return Err(ErrUnsupProt) // Actually, because of the check above,
// we should never get here.
};
unsafe {
let hFile = handle;
let mapping = libc::CreateFileMappingW(hFile,
ptr::null_mut(),
flProtect,
0,
0,
ptr::null());
if mapping == ptr::null_mut() {
return Err(ErrCreateFileMappingW(errno()));
}
if errno() as c_int == libc::ERROR_ALREADY_EXISTS {
return Err(ErrAlreadyExists);
}
let r = libc::MapViewOfFile(mapping,
dwDesiredAccess,
((len as u64) >> 32) as DWORD,
(offset & 0xffff_ffff) as DWORD,
0);
match r as usize {
0 => Err(ErrMapViewOfFile(errno())),
_ => Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: MapFile(mapping as *const u8)
})
}
}
} else {
if offset!= 0 {
return Err(ErrUnsupOffset);
}
let r = unsafe {
libc::VirtualAlloc(lpAddress,
len as SIZE_T,
libc::MEM_COMMIT | libc::MEM_RESERVE,
flProtect)
};
match r as usize {
0 => Err(ErrVirtualAlloc(errno())),
_ => Ok(MemoryMap {
data: r as *mut u8,
len: len,
kind: MapVirtual
})
}
}
}
/// Granularity of MapAddr() and MapOffset() parameter values.
/// This may be greater than the value returned by page_size().
pub fn granularity() -> usize {
use std::mem;
unsafe {
let mut info = mem::zeroed();
libc::GetSystemInfo(&mut info);
return info.dwAllocationGranularity as usize;
}
}
}
#[cfg(windows)]
impl Drop for MemoryMap {
/// Unmap the mapping. Panics the task if any of `VirtualFree`,
/// `UnmapViewOfFile`, or `CloseHandle` fail.
fn drop(&mut self) {
use libc::types::os::arch::extra::{LPCVOID, HANDLE};
use libc::consts::os::extra::FALSE;
if self.len == 0 { return }
unsafe {
match self.kind {
MapVirtual => {
if libc::VirtualFree(self.data as *mut c_void, 0,
libc::MEM_RELEASE) == 0 {
println!("VirtualFree failed: {}", errno());
}
},
MapFile(mapping) => {
if libc::UnmapViewOfFile(self.data as LPCVOID) == FALSE {
println!("UnmapViewOfFile failed: {}", errno());
}
if libc::CloseHandle(mapping as HANDLE) == FALSE {
println!("CloseHandle failed: {}", errno());
}
}
}
}
}
}
impl MemoryMap {
/// Returns the pointer to the memory created or modified by this map.
#[inline(always)]
pub fn data(&self) -> *mut u8 { self.data }
/// Returns the number of bytes this map applies to.
#[inline(always)]
pub fn len(&self) -> usize { self.len }
/// Returns the type of mapping this represents.
pub fn kind(&self) -> MemoryMapKind { self.kind }
}
#[cfg(test)]
mod tests {
extern crate libc;
extern crate tempdir;
use super::{MemoryMap, MapOption};
#[test]
fn memory_map_rw() {
let chunk = match MemoryMap::new(16, &[
MapOption::MapReadable,
MapOption::MapWritable
]) {
Ok(chunk) => chunk,
Err(msg) => panic!("{:?}", msg)
};
assert!(chunk.len >= 16);
unsafe {
*chunk.data = 0xBE;
assert!(*chunk.data == 0xBE);
}
}
#[test]
fn memory_map_file() {
use std::fs;
use std::io::{Seek, SeekFrom, Write};
#[cfg(unix)]
fn get_fd(file: &fs::File) -> libc::c_int {
use std::os::unix::io::AsRawFd;
file.as_raw_fd()
}
#[cfg(windows)]
fn get_fd(file: &fs::File) -> libc::HANDLE {
use std::os::windows::io::AsRawHandle;
file.as_raw_handle() as libc::HANDLE
}
let tmpdir = tempdir::TempDir::new("").unwrap();
let mut path = tmpdir.path().to_path_buf();
path.push("mmap_file.tmp");
let size = MemoryMap::granularity() * 2;
let mut file = fs::OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(&path)
.unwrap();
file.seek(SeekFrom::Start(size as u64)).unwrap();
file.write(b"\0").unwrap();
let fd = get_fd(&file);
let chunk = MemoryMap::new(size / 2, &[
MapOption::MapReadable,
MapOption::MapWritable,
MapOption::MapFd(fd),
MapOption::MapOffset(size / 2)
]).unwrap();
assert!(chunk.len > 0);
unsafe {
*chunk.data = 0xbe;
assert!(*chunk.data == 0xbe);
}
drop(chunk);
fs::remove_file(&path).unwrap();
}
}
|
MapFd(fd_) => {
flags |= libc::MAP_FILE;
fd = fd_;
|
random_line_split
|
time_zone.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib_sys;
use translate::*;
use GString;
use TimeType;
glib_wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct TimeZone(Shared<glib_sys::GTimeZone>);
match fn {
ref => |ptr| glib_sys::g_time_zone_ref(ptr),
unref => |ptr| glib_sys::g_time_zone_unref(ptr),
get_type => || glib_sys::g_time_zone_get_type(),
}
}
impl TimeZone {
pub fn new(identifier: Option<&str>) -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new(identifier.to_glib_none().0)) }
}
pub fn new_local() -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_local()) }
}
#[cfg(any(feature = "v2_58", feature = "dox"))]
pub fn
|
(seconds: i32) -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_offset(seconds)) }
}
pub fn new_utc() -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_utc()) }
}
pub fn find_interval(&self, type_: TimeType, time_: i64) -> i32 {
unsafe {
glib_sys::g_time_zone_find_interval(self.to_glib_none().0, type_.to_glib(), time_)
}
}
pub fn get_abbreviation(&self, interval: i32) -> GString {
unsafe {
from_glib_none(glib_sys::g_time_zone_get_abbreviation(
self.to_glib_none().0,
interval,
))
}
}
#[cfg(any(feature = "v2_58", feature = "dox"))]
pub fn get_identifier(&self) -> GString {
unsafe { from_glib_none(glib_sys::g_time_zone_get_identifier(self.to_glib_none().0)) }
}
pub fn get_offset(&self, interval: i32) -> i32 {
unsafe { glib_sys::g_time_zone_get_offset(self.to_glib_none().0, interval) }
}
pub fn is_dst(&self, interval: i32) -> bool {
unsafe {
from_glib(glib_sys::g_time_zone_is_dst(
self.to_glib_none().0,
interval,
))
}
}
}
unsafe impl Send for TimeZone {}
unsafe impl Sync for TimeZone {}
|
new_offset
|
identifier_name
|
time_zone.rs
|
use glib_sys;
use translate::*;
use GString;
use TimeType;
glib_wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct TimeZone(Shared<glib_sys::GTimeZone>);
match fn {
ref => |ptr| glib_sys::g_time_zone_ref(ptr),
unref => |ptr| glib_sys::g_time_zone_unref(ptr),
get_type => || glib_sys::g_time_zone_get_type(),
}
}
impl TimeZone {
pub fn new(identifier: Option<&str>) -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new(identifier.to_glib_none().0)) }
}
pub fn new_local() -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_local()) }
}
#[cfg(any(feature = "v2_58", feature = "dox"))]
pub fn new_offset(seconds: i32) -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_offset(seconds)) }
}
pub fn new_utc() -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_utc()) }
}
pub fn find_interval(&self, type_: TimeType, time_: i64) -> i32 {
unsafe {
glib_sys::g_time_zone_find_interval(self.to_glib_none().0, type_.to_glib(), time_)
}
}
pub fn get_abbreviation(&self, interval: i32) -> GString {
unsafe {
from_glib_none(glib_sys::g_time_zone_get_abbreviation(
self.to_glib_none().0,
interval,
))
}
}
#[cfg(any(feature = "v2_58", feature = "dox"))]
pub fn get_identifier(&self) -> GString {
unsafe { from_glib_none(glib_sys::g_time_zone_get_identifier(self.to_glib_none().0)) }
}
pub fn get_offset(&self, interval: i32) -> i32 {
unsafe { glib_sys::g_time_zone_get_offset(self.to_glib_none().0, interval) }
}
pub fn is_dst(&self, interval: i32) -> bool {
unsafe {
from_glib(glib_sys::g_time_zone_is_dst(
self.to_glib_none().0,
interval,
))
}
}
}
unsafe impl Send for TimeZone {}
unsafe impl Sync for TimeZone {}
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
|
random_line_split
|
|
time_zone.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib_sys;
use translate::*;
use GString;
use TimeType;
glib_wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct TimeZone(Shared<glib_sys::GTimeZone>);
match fn {
ref => |ptr| glib_sys::g_time_zone_ref(ptr),
unref => |ptr| glib_sys::g_time_zone_unref(ptr),
get_type => || glib_sys::g_time_zone_get_type(),
}
}
impl TimeZone {
pub fn new(identifier: Option<&str>) -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new(identifier.to_glib_none().0)) }
}
pub fn new_local() -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_local()) }
}
#[cfg(any(feature = "v2_58", feature = "dox"))]
pub fn new_offset(seconds: i32) -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_offset(seconds)) }
}
pub fn new_utc() -> TimeZone {
unsafe { from_glib_full(glib_sys::g_time_zone_new_utc()) }
}
pub fn find_interval(&self, type_: TimeType, time_: i64) -> i32 {
unsafe {
glib_sys::g_time_zone_find_interval(self.to_glib_none().0, type_.to_glib(), time_)
}
}
pub fn get_abbreviation(&self, interval: i32) -> GString
|
#[cfg(any(feature = "v2_58", feature = "dox"))]
pub fn get_identifier(&self) -> GString {
unsafe { from_glib_none(glib_sys::g_time_zone_get_identifier(self.to_glib_none().0)) }
}
pub fn get_offset(&self, interval: i32) -> i32 {
unsafe { glib_sys::g_time_zone_get_offset(self.to_glib_none().0, interval) }
}
pub fn is_dst(&self, interval: i32) -> bool {
unsafe {
from_glib(glib_sys::g_time_zone_is_dst(
self.to_glib_none().0,
interval,
))
}
}
}
unsafe impl Send for TimeZone {}
unsafe impl Sync for TimeZone {}
|
{
unsafe {
from_glib_none(glib_sys::g_time_zone_get_abbreviation(
self.to_glib_none().0,
interval,
))
}
}
|
identifier_body
|
kl25z_map.rs
|
// Copyright 2014 Martin Kojtal (0xc0170)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use io::VolatileRW;
const BASE_SIM : u32 = 0x40047000;
pub struct Sim {
pub sopt1 : VolatileRW<u32>,
pub sopt1cfg : VolatileRW<u32>,
pub reserved_0 : [u8; 4092],
pub sopt2 : VolatileRW<u32>,
pub reserved_1 : [u8; 4],
pub sopt4 : VolatileRW<u32>,
pub sopt5 : VolatileRW<u32>,
pub reserved_2 : [u8; 4],
pub sopt7 : VolatileRW<u32>,
pub reserved_3 : [u8; 8],
pub sdid : VolatileRW<u32>,
pub reserved_4 : [u8; 12],
pub scgc4 : VolatileRW<u32>,
pub scgc5 : VolatileRW<u32>,
pub scgc6 : VolatileRW<u32>,
pub scgc7 : VolatileRW<u32>,
pub clkdiv1 : VolatileRW<u32>,
pub reserved_5 : [u8; 4],
pub fcfg1 : VolatileRW<u32>,
pub fcfg2 : VolatileRW<u32>,
pub reserved_6 : VolatileRW<u32>,
pub uidmh : VolatileRW<u32>,
pub uidml : VolatileRW<u32>,
pub uidl : VolatileRW<u32>,
pub reserved_7 : [u8; 156],
pub copc : VolatileRW<u32>,
pub srvcop : VolatileRW<u32>,
}
impl Sim {
pub fn get() -> &'static Sim {
unsafe {
&*(BASE_SIM as *const Sim)
}
}
}
const BASE_PORTA : u32 = 0x40049000;
pub struct Port {
pub pcr : [VolatileRW<u32>; 32],
pub gpclr : VolatileRW<u32>,
pub gpchr : VolatileRW<u32>,
pub reserved_0 : [u8; 24],
pub isfr : VolatileRW<u32>,
}
impl Port {
pub fn
|
(port: u32) -> &'static Port {
unsafe {
&*((BASE_PORTA + (port*0x1000)) as *const Port)
}
}
}
const BASE_MCG : u32 = 0x40064000;
pub struct Mcg {
pub c1 : VolatileRW<u8>,
pub c2 : VolatileRW<u8>,
pub c3 : VolatileRW<u8>,
pub c4 : VolatileRW<u8>,
pub c5 : VolatileRW<u8>,
pub c6 : VolatileRW<u8>,
pub s : VolatileRW<u8>,
pub reserved_0 : [u8; 1],
pub sc : VolatileRW<u8>,
pub reserved_1 : [u8; 1],
pub atcvh : VolatileRW<u8>,
pub atcvl : VolatileRW<u8>,
pub c7 : VolatileRW<u8>,
pub c8 : VolatileRW<u8>,
pub c9 : VolatileRW<u8>,
pub c10 : VolatileRW<u8>,
}
impl Mcg {
pub fn get() -> &'static Mcg {
unsafe {
&*(BASE_MCG as *const Mcg)
}
}
}
const BASE_OSC0 : u32 = 0x40065000;
pub struct Osc0 {
pub cr : VolatileRW<u8>,
}
impl Osc0 {
pub fn get() -> &'static Osc0 {
unsafe {
&*(BASE_OSC0 as *const Osc0)
}
}
}
const BASE_PTA :u32 = 0x400FF000;
pub struct Gpio {
pub pdor : VolatileRW<u32>,
pub psor : VolatileRW<u32>,
pub pcor : VolatileRW<u32>,
pub ptor : VolatileRW<u32>,
pub pdir : VolatileRW<u32>,
pub pddr : VolatileRW<u32>,
}
impl Gpio {
pub fn get(port : u32) -> &'static Gpio {
unsafe {
&*((BASE_PTA + (port*0x40)) as *const Gpio)
}
}
}
|
get
|
identifier_name
|
kl25z_map.rs
|
// Copyright 2014 Martin Kojtal (0xc0170)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use io::VolatileRW;
const BASE_SIM : u32 = 0x40047000;
pub struct Sim {
pub sopt1 : VolatileRW<u32>,
pub sopt1cfg : VolatileRW<u32>,
pub reserved_0 : [u8; 4092],
pub sopt2 : VolatileRW<u32>,
pub reserved_1 : [u8; 4],
pub sopt4 : VolatileRW<u32>,
pub sopt5 : VolatileRW<u32>,
pub reserved_2 : [u8; 4],
pub sopt7 : VolatileRW<u32>,
pub reserved_3 : [u8; 8],
pub sdid : VolatileRW<u32>,
pub reserved_4 : [u8; 12],
pub scgc4 : VolatileRW<u32>,
pub scgc5 : VolatileRW<u32>,
pub scgc6 : VolatileRW<u32>,
pub scgc7 : VolatileRW<u32>,
pub clkdiv1 : VolatileRW<u32>,
pub reserved_5 : [u8; 4],
pub fcfg1 : VolatileRW<u32>,
pub fcfg2 : VolatileRW<u32>,
pub reserved_6 : VolatileRW<u32>,
pub uidmh : VolatileRW<u32>,
pub uidml : VolatileRW<u32>,
pub uidl : VolatileRW<u32>,
pub reserved_7 : [u8; 156],
pub copc : VolatileRW<u32>,
pub srvcop : VolatileRW<u32>,
}
impl Sim {
pub fn get() -> &'static Sim {
unsafe {
&*(BASE_SIM as *const Sim)
}
}
}
const BASE_PORTA : u32 = 0x40049000;
pub struct Port {
pub pcr : [VolatileRW<u32>; 32],
pub gpclr : VolatileRW<u32>,
pub gpchr : VolatileRW<u32>,
pub reserved_0 : [u8; 24],
pub isfr : VolatileRW<u32>,
}
impl Port {
pub fn get(port: u32) -> &'static Port {
unsafe {
&*((BASE_PORTA + (port*0x1000)) as *const Port)
}
}
}
const BASE_MCG : u32 = 0x40064000;
pub struct Mcg {
pub c1 : VolatileRW<u8>,
pub c2 : VolatileRW<u8>,
pub c3 : VolatileRW<u8>,
pub c4 : VolatileRW<u8>,
pub c5 : VolatileRW<u8>,
pub c6 : VolatileRW<u8>,
pub s : VolatileRW<u8>,
pub reserved_0 : [u8; 1],
pub sc : VolatileRW<u8>,
pub reserved_1 : [u8; 1],
pub atcvh : VolatileRW<u8>,
pub atcvl : VolatileRW<u8>,
pub c7 : VolatileRW<u8>,
pub c8 : VolatileRW<u8>,
pub c9 : VolatileRW<u8>,
pub c10 : VolatileRW<u8>,
}
impl Mcg {
pub fn get() -> &'static Mcg {
unsafe {
&*(BASE_MCG as *const Mcg)
}
}
}
|
pub cr : VolatileRW<u8>,
}
impl Osc0 {
pub fn get() -> &'static Osc0 {
unsafe {
&*(BASE_OSC0 as *const Osc0)
}
}
}
const BASE_PTA :u32 = 0x400FF000;
pub struct Gpio {
pub pdor : VolatileRW<u32>,
pub psor : VolatileRW<u32>,
pub pcor : VolatileRW<u32>,
pub ptor : VolatileRW<u32>,
pub pdir : VolatileRW<u32>,
pub pddr : VolatileRW<u32>,
}
impl Gpio {
pub fn get(port : u32) -> &'static Gpio {
unsafe {
&*((BASE_PTA + (port*0x40)) as *const Gpio)
}
}
}
|
const BASE_OSC0 : u32 = 0x40065000;
pub struct Osc0 {
|
random_line_split
|
kl25z_map.rs
|
// Copyright 2014 Martin Kojtal (0xc0170)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use io::VolatileRW;
const BASE_SIM : u32 = 0x40047000;
pub struct Sim {
pub sopt1 : VolatileRW<u32>,
pub sopt1cfg : VolatileRW<u32>,
pub reserved_0 : [u8; 4092],
pub sopt2 : VolatileRW<u32>,
pub reserved_1 : [u8; 4],
pub sopt4 : VolatileRW<u32>,
pub sopt5 : VolatileRW<u32>,
pub reserved_2 : [u8; 4],
pub sopt7 : VolatileRW<u32>,
pub reserved_3 : [u8; 8],
pub sdid : VolatileRW<u32>,
pub reserved_4 : [u8; 12],
pub scgc4 : VolatileRW<u32>,
pub scgc5 : VolatileRW<u32>,
pub scgc6 : VolatileRW<u32>,
pub scgc7 : VolatileRW<u32>,
pub clkdiv1 : VolatileRW<u32>,
pub reserved_5 : [u8; 4],
pub fcfg1 : VolatileRW<u32>,
pub fcfg2 : VolatileRW<u32>,
pub reserved_6 : VolatileRW<u32>,
pub uidmh : VolatileRW<u32>,
pub uidml : VolatileRW<u32>,
pub uidl : VolatileRW<u32>,
pub reserved_7 : [u8; 156],
pub copc : VolatileRW<u32>,
pub srvcop : VolatileRW<u32>,
}
impl Sim {
pub fn get() -> &'static Sim
|
}
const BASE_PORTA : u32 = 0x40049000;
pub struct Port {
pub pcr : [VolatileRW<u32>; 32],
pub gpclr : VolatileRW<u32>,
pub gpchr : VolatileRW<u32>,
pub reserved_0 : [u8; 24],
pub isfr : VolatileRW<u32>,
}
impl Port {
pub fn get(port: u32) -> &'static Port {
unsafe {
&*((BASE_PORTA + (port*0x1000)) as *const Port)
}
}
}
const BASE_MCG : u32 = 0x40064000;
pub struct Mcg {
pub c1 : VolatileRW<u8>,
pub c2 : VolatileRW<u8>,
pub c3 : VolatileRW<u8>,
pub c4 : VolatileRW<u8>,
pub c5 : VolatileRW<u8>,
pub c6 : VolatileRW<u8>,
pub s : VolatileRW<u8>,
pub reserved_0 : [u8; 1],
pub sc : VolatileRW<u8>,
pub reserved_1 : [u8; 1],
pub atcvh : VolatileRW<u8>,
pub atcvl : VolatileRW<u8>,
pub c7 : VolatileRW<u8>,
pub c8 : VolatileRW<u8>,
pub c9 : VolatileRW<u8>,
pub c10 : VolatileRW<u8>,
}
impl Mcg {
pub fn get() -> &'static Mcg {
unsafe {
&*(BASE_MCG as *const Mcg)
}
}
}
const BASE_OSC0 : u32 = 0x40065000;
pub struct Osc0 {
pub cr : VolatileRW<u8>,
}
impl Osc0 {
pub fn get() -> &'static Osc0 {
unsafe {
&*(BASE_OSC0 as *const Osc0)
}
}
}
const BASE_PTA :u32 = 0x400FF000;
pub struct Gpio {
pub pdor : VolatileRW<u32>,
pub psor : VolatileRW<u32>,
pub pcor : VolatileRW<u32>,
pub ptor : VolatileRW<u32>,
pub pdir : VolatileRW<u32>,
pub pddr : VolatileRW<u32>,
}
impl Gpio {
pub fn get(port : u32) -> &'static Gpio {
unsafe {
&*((BASE_PTA + (port*0x40)) as *const Gpio)
}
}
}
|
{
unsafe {
&*(BASE_SIM as *const Sim)
}
}
|
identifier_body
|
utils.rs
|
//! Shared mathematical utility functions.
/// Cut value to be inside given range
///
/// ```
/// use image::math::utils;
///
/// assert_eq!(utils::clamp(-5, 0, 10), 0);
/// assert_eq!(utils::clamp( 6, 0, 10), 6);
/// assert_eq!(utils::clamp(15, 0, 10), 10);
/// ```
#[inline]
#[deprecated]
pub fn clamp<N>(a: N, min: N, max: N) -> N
where
N: PartialOrd,
|
/// Calculates the width and height an image should be resized to.
/// This preserves aspect ratio, and based on the `fill` parameter
/// will either fill the dimensions to fit inside the smaller constraint
/// (will overflow the specified bounds on one axis to preserve
/// aspect ratio), or will shrink so that both dimensions are
/// completely contained with in the given `width` and `height`,
/// with empty space on one axis.
pub(crate) fn resize_dimensions(width: u32, height: u32, nwidth: u32, nheight: u32, fill: bool) -> (u32, u32) {
let ratio = u64::from(width) * u64::from(nheight);
let nratio = u64::from(nwidth) * u64::from(height);
let use_width = if fill {
nratio > ratio
} else {
nratio <= ratio
};
let intermediate = if use_width {
u64::from(height) * u64::from(nwidth) / u64::from(width)
} else {
u64::from(width) * u64::from(nheight) / u64::from(height)
};
let intermediate = std::cmp::max(1, intermediate);
if use_width {
if intermediate <= u64::from(::std::u32::MAX) {
(nwidth, intermediate as u32)
} else {
(
(u64::from(nwidth) * u64::from(::std::u32::MAX) / intermediate) as u32,
::std::u32::MAX,
)
}
} else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, nheight)
} else {
(
::std::u32::MAX,
(u64::from(nheight) * u64::from(::std::u32::MAX) / intermediate) as u32,
)
}
}
#[cfg(test)]
mod test {
quickcheck! {
fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
if old_w == 0 || new_w == 0 { return true; }
let result = super::resize_dimensions(old_w, 400, new_w, ::std::u32::MAX, false);
result.0 == new_w && result.1 == (400 as f64 * new_w as f64 / old_w as f64) as u32
}
}
quickcheck! {
fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
if old_h == 0 || new_h == 0 { return true; }
let result = super::resize_dimensions(400, old_h, ::std::u32::MAX, new_h, false);
result.1 == new_h && result.0 == (400 as f64 * new_h as f64 / old_h as f64) as u32
}
}
#[test]
fn resize_handles_fill() {
let result = super::resize_dimensions(100, 200, 200, 500, true);
assert!(result.0 == 250);
assert!(result.1 == 500);
let result = super::resize_dimensions(200, 100, 500, 200, true);
assert!(result.0 == 500);
assert!(result.1 == 250);
}
#[test]
fn resize_never_rounds_to_zero() {
let result = super::resize_dimensions(1, 150, 128, 128, false);
assert!(result.0 > 0);
assert!(result.1 > 0);
}
#[test]
fn resize_handles_overflow() {
let result = super::resize_dimensions(100, ::std::u32::MAX, 200, ::std::u32::MAX, true);
assert!(result.0 == 100);
assert!(result.1 == ::std::u32::MAX);
let result = super::resize_dimensions(::std::u32::MAX, 100, ::std::u32::MAX, 200, true);
assert!(result.0 == ::std::u32::MAX);
assert!(result.1 == 100);
}
}
|
{
if a < min {
return min;
}
if a > max {
return max;
}
a
}
|
identifier_body
|
utils.rs
|
//! Shared mathematical utility functions.
/// Cut value to be inside given range
///
/// ```
/// use image::math::utils;
///
/// assert_eq!(utils::clamp(-5, 0, 10), 0);
/// assert_eq!(utils::clamp( 6, 0, 10), 6);
/// assert_eq!(utils::clamp(15, 0, 10), 10);
/// ```
#[inline]
#[deprecated]
pub fn clamp<N>(a: N, min: N, max: N) -> N
where
N: PartialOrd,
{
if a < min {
return min;
}
if a > max {
return max;
}
a
}
/// Calculates the width and height an image should be resized to.
/// This preserves aspect ratio, and based on the `fill` parameter
/// will either fill the dimensions to fit inside the smaller constraint
/// (will overflow the specified bounds on one axis to preserve
/// aspect ratio), or will shrink so that both dimensions are
/// completely contained with in the given `width` and `height`,
/// with empty space on one axis.
pub(crate) fn resize_dimensions(width: u32, height: u32, nwidth: u32, nheight: u32, fill: bool) -> (u32, u32) {
let ratio = u64::from(width) * u64::from(nheight);
let nratio = u64::from(nwidth) * u64::from(height);
let use_width = if fill {
nratio > ratio
} else {
nratio <= ratio
};
let intermediate = if use_width
|
else {
u64::from(width) * u64::from(nheight) / u64::from(height)
};
let intermediate = std::cmp::max(1, intermediate);
if use_width {
if intermediate <= u64::from(::std::u32::MAX) {
(nwidth, intermediate as u32)
} else {
(
(u64::from(nwidth) * u64::from(::std::u32::MAX) / intermediate) as u32,
::std::u32::MAX,
)
}
} else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, nheight)
} else {
(
::std::u32::MAX,
(u64::from(nheight) * u64::from(::std::u32::MAX) / intermediate) as u32,
)
}
}
#[cfg(test)]
mod test {
quickcheck! {
fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
if old_w == 0 || new_w == 0 { return true; }
let result = super::resize_dimensions(old_w, 400, new_w, ::std::u32::MAX, false);
result.0 == new_w && result.1 == (400 as f64 * new_w as f64 / old_w as f64) as u32
}
}
quickcheck! {
fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
if old_h == 0 || new_h == 0 { return true; }
let result = super::resize_dimensions(400, old_h, ::std::u32::MAX, new_h, false);
result.1 == new_h && result.0 == (400 as f64 * new_h as f64 / old_h as f64) as u32
}
}
#[test]
fn resize_handles_fill() {
let result = super::resize_dimensions(100, 200, 200, 500, true);
assert!(result.0 == 250);
assert!(result.1 == 500);
let result = super::resize_dimensions(200, 100, 500, 200, true);
assert!(result.0 == 500);
assert!(result.1 == 250);
}
#[test]
fn resize_never_rounds_to_zero() {
let result = super::resize_dimensions(1, 150, 128, 128, false);
assert!(result.0 > 0);
assert!(result.1 > 0);
}
#[test]
fn resize_handles_overflow() {
let result = super::resize_dimensions(100, ::std::u32::MAX, 200, ::std::u32::MAX, true);
assert!(result.0 == 100);
assert!(result.1 == ::std::u32::MAX);
let result = super::resize_dimensions(::std::u32::MAX, 100, ::std::u32::MAX, 200, true);
assert!(result.0 == ::std::u32::MAX);
assert!(result.1 == 100);
}
}
|
{
u64::from(height) * u64::from(nwidth) / u64::from(width)
}
|
conditional_block
|
utils.rs
|
//! Shared mathematical utility functions.
/// Cut value to be inside given range
///
/// ```
/// use image::math::utils;
///
/// assert_eq!(utils::clamp(-5, 0, 10), 0);
/// assert_eq!(utils::clamp( 6, 0, 10), 6);
/// assert_eq!(utils::clamp(15, 0, 10), 10);
/// ```
#[inline]
#[deprecated]
pub fn clamp<N>(a: N, min: N, max: N) -> N
where
N: PartialOrd,
{
if a < min {
return min;
}
if a > max {
return max;
}
a
}
/// Calculates the width and height an image should be resized to.
/// This preserves aspect ratio, and based on the `fill` parameter
/// will either fill the dimensions to fit inside the smaller constraint
/// (will overflow the specified bounds on one axis to preserve
/// aspect ratio), or will shrink so that both dimensions are
/// completely contained with in the given `width` and `height`,
/// with empty space on one axis.
pub(crate) fn resize_dimensions(width: u32, height: u32, nwidth: u32, nheight: u32, fill: bool) -> (u32, u32) {
let ratio = u64::from(width) * u64::from(nheight);
let nratio = u64::from(nwidth) * u64::from(height);
let use_width = if fill {
nratio > ratio
} else {
nratio <= ratio
};
let intermediate = if use_width {
u64::from(height) * u64::from(nwidth) / u64::from(width)
} else {
u64::from(width) * u64::from(nheight) / u64::from(height)
};
let intermediate = std::cmp::max(1, intermediate);
if use_width {
if intermediate <= u64::from(::std::u32::MAX) {
(nwidth, intermediate as u32)
} else {
(
(u64::from(nwidth) * u64::from(::std::u32::MAX) / intermediate) as u32,
::std::u32::MAX,
)
}
} else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, nheight)
} else {
(
::std::u32::MAX,
(u64::from(nheight) * u64::from(::std::u32::MAX) / intermediate) as u32,
)
}
}
#[cfg(test)]
mod test {
quickcheck! {
fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
if old_w == 0 || new_w == 0 { return true; }
let result = super::resize_dimensions(old_w, 400, new_w, ::std::u32::MAX, false);
result.0 == new_w && result.1 == (400 as f64 * new_w as f64 / old_w as f64) as u32
}
}
quickcheck! {
fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
if old_h == 0 || new_h == 0 { return true; }
let result = super::resize_dimensions(400, old_h, ::std::u32::MAX, new_h, false);
result.1 == new_h && result.0 == (400 as f64 * new_h as f64 / old_h as f64) as u32
}
}
#[test]
fn resize_handles_fill() {
let result = super::resize_dimensions(100, 200, 200, 500, true);
assert!(result.0 == 250);
assert!(result.1 == 500);
let result = super::resize_dimensions(200, 100, 500, 200, true);
assert!(result.0 == 500);
assert!(result.1 == 250);
}
#[test]
fn
|
() {
let result = super::resize_dimensions(1, 150, 128, 128, false);
assert!(result.0 > 0);
assert!(result.1 > 0);
}
#[test]
fn resize_handles_overflow() {
let result = super::resize_dimensions(100, ::std::u32::MAX, 200, ::std::u32::MAX, true);
assert!(result.0 == 100);
assert!(result.1 == ::std::u32::MAX);
let result = super::resize_dimensions(::std::u32::MAX, 100, ::std::u32::MAX, 200, true);
assert!(result.0 == ::std::u32::MAX);
assert!(result.1 == 100);
}
}
|
resize_never_rounds_to_zero
|
identifier_name
|
utils.rs
|
//! Shared mathematical utility functions.
/// Cut value to be inside given range
///
/// ```
/// use image::math::utils;
///
/// assert_eq!(utils::clamp(-5, 0, 10), 0);
/// assert_eq!(utils::clamp( 6, 0, 10), 6);
/// assert_eq!(utils::clamp(15, 0, 10), 10);
/// ```
#[inline]
#[deprecated]
pub fn clamp<N>(a: N, min: N, max: N) -> N
where
N: PartialOrd,
{
if a < min {
return min;
}
if a > max {
return max;
}
a
}
|
/// (will overflow the specified bounds on one axis to preserve
/// aspect ratio), or will shrink so that both dimensions are
/// completely contained with in the given `width` and `height`,
/// with empty space on one axis.
pub(crate) fn resize_dimensions(width: u32, height: u32, nwidth: u32, nheight: u32, fill: bool) -> (u32, u32) {
let ratio = u64::from(width) * u64::from(nheight);
let nratio = u64::from(nwidth) * u64::from(height);
let use_width = if fill {
nratio > ratio
} else {
nratio <= ratio
};
let intermediate = if use_width {
u64::from(height) * u64::from(nwidth) / u64::from(width)
} else {
u64::from(width) * u64::from(nheight) / u64::from(height)
};
let intermediate = std::cmp::max(1, intermediate);
if use_width {
if intermediate <= u64::from(::std::u32::MAX) {
(nwidth, intermediate as u32)
} else {
(
(u64::from(nwidth) * u64::from(::std::u32::MAX) / intermediate) as u32,
::std::u32::MAX,
)
}
} else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, nheight)
} else {
(
::std::u32::MAX,
(u64::from(nheight) * u64::from(::std::u32::MAX) / intermediate) as u32,
)
}
}
#[cfg(test)]
mod test {
quickcheck! {
fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
if old_w == 0 || new_w == 0 { return true; }
let result = super::resize_dimensions(old_w, 400, new_w, ::std::u32::MAX, false);
result.0 == new_w && result.1 == (400 as f64 * new_w as f64 / old_w as f64) as u32
}
}
quickcheck! {
fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
if old_h == 0 || new_h == 0 { return true; }
let result = super::resize_dimensions(400, old_h, ::std::u32::MAX, new_h, false);
result.1 == new_h && result.0 == (400 as f64 * new_h as f64 / old_h as f64) as u32
}
}
#[test]
fn resize_handles_fill() {
let result = super::resize_dimensions(100, 200, 200, 500, true);
assert!(result.0 == 250);
assert!(result.1 == 500);
let result = super::resize_dimensions(200, 100, 500, 200, true);
assert!(result.0 == 500);
assert!(result.1 == 250);
}
#[test]
fn resize_never_rounds_to_zero() {
let result = super::resize_dimensions(1, 150, 128, 128, false);
assert!(result.0 > 0);
assert!(result.1 > 0);
}
#[test]
fn resize_handles_overflow() {
let result = super::resize_dimensions(100, ::std::u32::MAX, 200, ::std::u32::MAX, true);
assert!(result.0 == 100);
assert!(result.1 == ::std::u32::MAX);
let result = super::resize_dimensions(::std::u32::MAX, 100, ::std::u32::MAX, 200, true);
assert!(result.0 == ::std::u32::MAX);
assert!(result.1 == 100);
}
}
|
/// Calculates the width and height an image should be resized to.
/// This preserves aspect ratio, and based on the `fill` parameter
/// will either fill the dimensions to fit inside the smaller constraint
|
random_line_split
|
sendmoremoney.rs
|
//! Send More Money.
//!
//! https://en.wikipedia.org/wiki/Verbal_arithmetic
extern crate puzzle_solver;
use puzzle_solver::{Puzzle,Solution,VarToken};
fn make_send_more_money() -> (Puzzle, Vec<VarToken>) {
let mut sys = Puzzle::new();
let vars = sys.new_vars_with_candidates_1d(8, &[0,1,2,3,4,5,6,7,8,9]);
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
sys.remove_candidates(s, &[0]);
sys.remove_candidates(m, &[0]);
sys.all_different(&vars);
let send = 1000 * s + 100 * e + 10 * n + d;
let more = 1000 * m + 100 * o + 10 * r + e;
let money = 10000 * m + 1000 * o + 100 * n + 10 * e + y;
sys.equals(send + more, money);
(sys, vars)
}
fn print_send_more_money(dict: &Solution, vars: &Vec<VarToken>) {
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
println!(" {} {} {} {}", dict[s], dict[e], dict[n], dict[d]);
println!(" + {} {} {} {}", dict[m], dict[o], dict[r], dict[e]);
println!("----------");
println!(" {} {} {} {} {}", dict[m], dict[o], dict[n], dict[e], dict[y]);
}
fn verify_send_more_money(dict: &Solution, vars: &Vec<VarToken>) {
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
assert_eq!(dict[o], 0);
assert_eq!(dict[m], 1);
assert_eq!(dict[y], 2);
assert_eq!(dict[e], 5);
assert_eq!(dict[n], 6);
assert_eq!(dict[d], 7);
assert_eq!(dict[r], 8);
assert_eq!(dict[s], 9);
}
#[test]
fn sendmoremoney_carry()
|
}
#[test]
fn sendmoremoney_naive() {
let (mut sys, vars) = make_send_more_money();
let dict = sys.solve_unique().expect("solution");
print_send_more_money(&dict, &vars);
verify_send_more_money(&dict, &vars);
println!("sendmoremoney_naive: {} guesses", sys.num_guesses());
}
|
{
let carry = [0,1];
let (mut sys, vars) = make_send_more_money();
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
let c1 = sys.new_var_with_candidates(&carry);
let c2 = sys.new_var_with_candidates(&carry);
let c3 = sys.new_var_with_candidates(&carry);
sys.intersect_candidates(m, &carry); // c4 == m.
sys.equals( d + e, 10 * c1 + y);
sys.equals(c1 + n + r, 10 * c2 + e);
sys.equals(c2 + e + o, 10 * c3 + n);
sys.equals(c3 + s + m, 10 * m + o);
let dict = sys.solve_unique().expect("solution");
print_send_more_money(&dict, &vars);
verify_send_more_money(&dict, &vars);
println!("sendmoremoney_carry: {} guesses", sys.num_guesses());
|
identifier_body
|
sendmoremoney.rs
|
//! Send More Money.
//!
//! https://en.wikipedia.org/wiki/Verbal_arithmetic
extern crate puzzle_solver;
use puzzle_solver::{Puzzle,Solution,VarToken};
fn make_send_more_money() -> (Puzzle, Vec<VarToken>) {
let mut sys = Puzzle::new();
let vars = sys.new_vars_with_candidates_1d(8, &[0,1,2,3,4,5,6,7,8,9]);
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
sys.remove_candidates(s, &[0]);
sys.remove_candidates(m, &[0]);
sys.all_different(&vars);
let send = 1000 * s + 100 * e + 10 * n + d;
let more = 1000 * m + 100 * o + 10 * r + e;
let money = 10000 * m + 1000 * o + 100 * n + 10 * e + y;
sys.equals(send + more, money);
(sys, vars)
}
fn print_send_more_money(dict: &Solution, vars: &Vec<VarToken>) {
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
println!(" {} {} {} {}", dict[s], dict[e], dict[n], dict[d]);
println!(" + {} {} {} {}", dict[m], dict[o], dict[r], dict[e]);
println!("----------");
println!(" {} {} {} {} {}", dict[m], dict[o], dict[n], dict[e], dict[y]);
}
fn verify_send_more_money(dict: &Solution, vars: &Vec<VarToken>) {
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
assert_eq!(dict[o], 0);
assert_eq!(dict[m], 1);
assert_eq!(dict[y], 2);
assert_eq!(dict[e], 5);
assert_eq!(dict[n], 6);
assert_eq!(dict[d], 7);
assert_eq!(dict[r], 8);
assert_eq!(dict[s], 9);
}
#[test]
fn sendmoremoney_carry() {
let carry = [0,1];
let (mut sys, vars) = make_send_more_money();
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
let c1 = sys.new_var_with_candidates(&carry);
let c2 = sys.new_var_with_candidates(&carry);
let c3 = sys.new_var_with_candidates(&carry);
sys.intersect_candidates(m, &carry); // c4 == m.
sys.equals( d + e, 10 * c1 + y);
sys.equals(c1 + n + r, 10 * c2 + e);
sys.equals(c2 + e + o, 10 * c3 + n);
sys.equals(c3 + s + m, 10 * m + o);
let dict = sys.solve_unique().expect("solution");
print_send_more_money(&dict, &vars);
verify_send_more_money(&dict, &vars);
|
}
#[test]
fn sendmoremoney_naive() {
let (mut sys, vars) = make_send_more_money();
let dict = sys.solve_unique().expect("solution");
print_send_more_money(&dict, &vars);
verify_send_more_money(&dict, &vars);
println!("sendmoremoney_naive: {} guesses", sys.num_guesses());
}
|
println!("sendmoremoney_carry: {} guesses", sys.num_guesses());
|
random_line_split
|
sendmoremoney.rs
|
//! Send More Money.
//!
//! https://en.wikipedia.org/wiki/Verbal_arithmetic
extern crate puzzle_solver;
use puzzle_solver::{Puzzle,Solution,VarToken};
fn
|
() -> (Puzzle, Vec<VarToken>) {
let mut sys = Puzzle::new();
let vars = sys.new_vars_with_candidates_1d(8, &[0,1,2,3,4,5,6,7,8,9]);
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
sys.remove_candidates(s, &[0]);
sys.remove_candidates(m, &[0]);
sys.all_different(&vars);
let send = 1000 * s + 100 * e + 10 * n + d;
let more = 1000 * m + 100 * o + 10 * r + e;
let money = 10000 * m + 1000 * o + 100 * n + 10 * e + y;
sys.equals(send + more, money);
(sys, vars)
}
fn print_send_more_money(dict: &Solution, vars: &Vec<VarToken>) {
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
println!(" {} {} {} {}", dict[s], dict[e], dict[n], dict[d]);
println!(" + {} {} {} {}", dict[m], dict[o], dict[r], dict[e]);
println!("----------");
println!(" {} {} {} {} {}", dict[m], dict[o], dict[n], dict[e], dict[y]);
}
fn verify_send_more_money(dict: &Solution, vars: &Vec<VarToken>) {
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
assert_eq!(dict[o], 0);
assert_eq!(dict[m], 1);
assert_eq!(dict[y], 2);
assert_eq!(dict[e], 5);
assert_eq!(dict[n], 6);
assert_eq!(dict[d], 7);
assert_eq!(dict[r], 8);
assert_eq!(dict[s], 9);
}
#[test]
fn sendmoremoney_carry() {
let carry = [0,1];
let (mut sys, vars) = make_send_more_money();
let (s, e, n, d) = (vars[0], vars[1], vars[2], vars[3]);
let (m, o, r, y) = (vars[4], vars[5], vars[6], vars[7]);
let c1 = sys.new_var_with_candidates(&carry);
let c2 = sys.new_var_with_candidates(&carry);
let c3 = sys.new_var_with_candidates(&carry);
sys.intersect_candidates(m, &carry); // c4 == m.
sys.equals( d + e, 10 * c1 + y);
sys.equals(c1 + n + r, 10 * c2 + e);
sys.equals(c2 + e + o, 10 * c3 + n);
sys.equals(c3 + s + m, 10 * m + o);
let dict = sys.solve_unique().expect("solution");
print_send_more_money(&dict, &vars);
verify_send_more_money(&dict, &vars);
println!("sendmoremoney_carry: {} guesses", sys.num_guesses());
}
#[test]
fn sendmoremoney_naive() {
let (mut sys, vars) = make_send_more_money();
let dict = sys.solve_unique().expect("solution");
print_send_more_money(&dict, &vars);
verify_send_more_money(&dict, &vars);
println!("sendmoremoney_naive: {} guesses", sys.num_guesses());
}
|
make_send_more_money
|
identifier_name
|
cargo.rs
|
#![allow(unstable)]
extern crate "rustc-serialize" as rustc_serialize;
extern crate cargo;
#[macro_use] extern crate log;
use std::collections::BTreeSet;
use std::os;
use std::io;
use std::io::fs::{self, PathExtensions};
use std::io::process::{Command,InheritFd,ExitStatus,ExitSignal};
use cargo::{execute_main_without_stdin, handle_error, shell};
use cargo::core::MultiShell;
use cargo::util::{CliError, CliResult, lev_distance};
#[derive(RustcDecodable)]
struct Flags {
flag_list: bool,
flag_verbose: bool,
arg_command: String,
arg_args: Vec<String>,
}
const USAGE: &'static str = "
Rust's package manager
Usage:
cargo <command> [<args>...]
cargo [options]
Options:
-h, --help Display this message
-V, --version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
Some common cargo commands are:
build Compile the current project
clean Remove the target directory
doc Build this project's and its dependencies' documentation
new Create a new cargo project
run Build and execute src/main.rs
test Run the tests
bench Run the benchmarks
update Update dependencies listed in Cargo.lock
See 'cargo help <command>' for more information on a specific command.
";
fn main() {
execute_main_without_stdin(execute, true, USAGE)
}
macro_rules! each_subcommand{ ($mac:ident) => ({
$mac!(bench);
$mac!(build);
$mac!(clean);
$mac!(config_for_key);
$mac!(config_list);
$mac!(doc);
$mac!(fetch);
$mac!(generate_lockfile);
$mac!(git_checkout);
$mac!(help);
$mac!(locate_project);
$mac!(login);
$mac!(new);
$mac!(owner);
$mac!(package);
$mac!(pkgid);
$mac!(publish);
$mac!(read_manifest);
$mac!(run);
$mac!(search);
$mac!(test);
$mac!(update);
$mac!(verify_project);
$mac!(version);
$mac!(yank);
}) }
/**
The top-level `cargo` command handles configuration and project location
because they are fundamental (and intertwined). Other commands can rely
on this top-level information.
*/
fn execute(flags: Flags, shell: &mut MultiShell) -> CliResult<Option<()>> {
debug!("executing; cmd=cargo; args={:?}", os::args());
shell.set_verbose(flags.flag_verbose);
if flags.flag_list {
println!("Installed Commands:");
|
println!(" {}", command);
};
return Ok(None)
}
let (mut args, command) = match flags.arg_command.as_slice() {
"" | "help" if flags.arg_args.len() == 0 => {
shell.set_verbose(true);
let args = &[os::args()[0].clone(), "-h".to_string()];
let r = cargo::call_main_without_stdin(execute, shell, USAGE, args,
false);
cargo::process_executed(r, shell);
return Ok(None)
}
"help" if flags.arg_args[0].as_slice() == "-h" ||
flags.arg_args[0].as_slice() == "--help" =>
(flags.arg_args, "help"),
"help" => (vec!["-h".to_string()], flags.arg_args[0].as_slice()),
s => (flags.arg_args.clone(), s),
};
args.insert(0, command.to_string());
args.insert(0, os::args()[0].clone());
macro_rules! cmd{ ($name:ident) => (
if command.as_slice() == stringify!($name).replace("_", "-").as_slice() {
mod $name;
shell.set_verbose(true);
let r = cargo::call_main_without_stdin($name::execute, shell,
$name::USAGE,
args.as_slice(),
false);
cargo::process_executed(r, shell);
return Ok(None)
}
) }
each_subcommand!(cmd);
execute_subcommand(command.as_slice(), args.as_slice(), shell);
Ok(None)
}
fn find_closest(cmd: &str) -> Option<String> {
match list_commands().iter()
// doing it this way (instead of just.min_by(|c| c.lev_distance(cmd)))
// allows us to only make suggestions that have an edit distance of
// 3 or less
.map(|c| (lev_distance(c.as_slice(), cmd), c))
.filter(|&(d, _): &(usize, &String)| d < 4)
.min_by(|&(d, _)| d) {
Some((_, c)) => {
Some(c.to_string())
},
None => None
}
}
fn execute_subcommand(cmd: &str, args: &[String], shell: &mut MultiShell) {
let command = match find_command(cmd) {
Some(command) => command,
None => {
let msg = match find_closest(cmd) {
Some(closest) => format!("No such subcommand\n\n\tDid you mean `{}`?\n", closest),
None => "No such subcommand".to_string()
};
return handle_error(CliError::new(msg, 127), shell)
}
};
let status = Command::new(command)
.args(args)
.stdin(InheritFd(0))
.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status();
match status {
Ok(ExitStatus(0)) => (),
Ok(ExitStatus(i)) => {
handle_error(CliError::new("", i as u32), shell)
}
Ok(ExitSignal(i)) => {
let msg = format!("subcommand failed with signal: {}", i);
handle_error(CliError::new(msg, i as u32), shell)
}
Err(io::IoError{kind,..}) if kind == io::FileNotFound =>
handle_error(CliError::new("No such subcommand", 127), shell),
Err(err) => handle_error(
CliError::new(
format!("Subcommand failed to run: {}", err), 127),
shell)
}
}
/// List all runnable commands. find_command should always succeed
/// if given one of returned command.
fn list_commands() -> BTreeSet<String> {
let command_prefix = "cargo-";
let mut commands = BTreeSet::new();
for dir in list_command_directory().iter() {
let entries = match fs::readdir(dir) {
Ok(entries) => entries,
_ => continue
};
for entry in entries.iter() {
let filename = match entry.filename_str() {
Some(filename) => filename,
_ => continue
};
if filename.starts_with(command_prefix) &&
filename.ends_with(os::consts::EXE_SUFFIX) &&
is_executable(entry) {
let command = filename.slice(
command_prefix.len(),
filename.len() - os::consts::EXE_SUFFIX.len());
commands.insert(String::from_str(command));
}
}
}
macro_rules! add_cmd{ ($cmd:ident) => ({
commands.insert(stringify!($cmd).replace("_", "-"));
}) }
each_subcommand!(add_cmd);
commands
}
fn is_executable(path: &Path) -> bool {
match fs::stat(path) {
Ok(io::FileStat{ kind: io::FileType::RegularFile, perm,..}) =>
perm.contains(io::OTHER_EXECUTE),
_ => false
}
}
/// Get `Command` to run given command.
fn find_command(cmd: &str) -> Option<Path> {
let command_exe = format!("cargo-{}{}", cmd, os::consts::EXE_SUFFIX);
let dirs = list_command_directory();
let mut command_paths = dirs.iter().map(|dir| dir.join(command_exe.as_slice()));
command_paths.find(|path| path.exists())
}
/// List candidate locations where subcommands might be installed.
fn list_command_directory() -> Vec<Path> {
let mut dirs = vec![];
match os::self_exe_path() {
Some(path) => {
dirs.push(path.join("../lib/cargo"));
dirs.push(path);
},
None => {}
};
match std::os::getenv("PATH") {
Some(val) => {
for dir in os::split_paths(val).iter() {
dirs.push(Path::new(dir))
}
},
None => {}
};
dirs
}
|
for command in list_commands().into_iter() {
|
random_line_split
|
cargo.rs
|
#![allow(unstable)]
extern crate "rustc-serialize" as rustc_serialize;
extern crate cargo;
#[macro_use] extern crate log;
use std::collections::BTreeSet;
use std::os;
use std::io;
use std::io::fs::{self, PathExtensions};
use std::io::process::{Command,InheritFd,ExitStatus,ExitSignal};
use cargo::{execute_main_without_stdin, handle_error, shell};
use cargo::core::MultiShell;
use cargo::util::{CliError, CliResult, lev_distance};
#[derive(RustcDecodable)]
struct Flags {
flag_list: bool,
flag_verbose: bool,
arg_command: String,
arg_args: Vec<String>,
}
const USAGE: &'static str = "
Rust's package manager
Usage:
cargo <command> [<args>...]
cargo [options]
Options:
-h, --help Display this message
-V, --version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
Some common cargo commands are:
build Compile the current project
clean Remove the target directory
doc Build this project's and its dependencies' documentation
new Create a new cargo project
run Build and execute src/main.rs
test Run the tests
bench Run the benchmarks
update Update dependencies listed in Cargo.lock
See 'cargo help <command>' for more information on a specific command.
";
fn main() {
execute_main_without_stdin(execute, true, USAGE)
}
macro_rules! each_subcommand{ ($mac:ident) => ({
$mac!(bench);
$mac!(build);
$mac!(clean);
$mac!(config_for_key);
$mac!(config_list);
$mac!(doc);
$mac!(fetch);
$mac!(generate_lockfile);
$mac!(git_checkout);
$mac!(help);
$mac!(locate_project);
$mac!(login);
$mac!(new);
$mac!(owner);
$mac!(package);
$mac!(pkgid);
$mac!(publish);
$mac!(read_manifest);
$mac!(run);
$mac!(search);
$mac!(test);
$mac!(update);
$mac!(verify_project);
$mac!(version);
$mac!(yank);
}) }
/**
The top-level `cargo` command handles configuration and project location
because they are fundamental (and intertwined). Other commands can rely
on this top-level information.
*/
fn execute(flags: Flags, shell: &mut MultiShell) -> CliResult<Option<()>> {
debug!("executing; cmd=cargo; args={:?}", os::args());
shell.set_verbose(flags.flag_verbose);
if flags.flag_list {
println!("Installed Commands:");
for command in list_commands().into_iter() {
println!(" {}", command);
};
return Ok(None)
}
let (mut args, command) = match flags.arg_command.as_slice() {
"" | "help" if flags.arg_args.len() == 0 => {
shell.set_verbose(true);
let args = &[os::args()[0].clone(), "-h".to_string()];
let r = cargo::call_main_without_stdin(execute, shell, USAGE, args,
false);
cargo::process_executed(r, shell);
return Ok(None)
}
"help" if flags.arg_args[0].as_slice() == "-h" ||
flags.arg_args[0].as_slice() == "--help" =>
(flags.arg_args, "help"),
"help" => (vec!["-h".to_string()], flags.arg_args[0].as_slice()),
s => (flags.arg_args.clone(), s),
};
args.insert(0, command.to_string());
args.insert(0, os::args()[0].clone());
macro_rules! cmd{ ($name:ident) => (
if command.as_slice() == stringify!($name).replace("_", "-").as_slice() {
mod $name;
shell.set_verbose(true);
let r = cargo::call_main_without_stdin($name::execute, shell,
$name::USAGE,
args.as_slice(),
false);
cargo::process_executed(r, shell);
return Ok(None)
}
) }
each_subcommand!(cmd);
execute_subcommand(command.as_slice(), args.as_slice(), shell);
Ok(None)
}
fn find_closest(cmd: &str) -> Option<String> {
match list_commands().iter()
// doing it this way (instead of just.min_by(|c| c.lev_distance(cmd)))
// allows us to only make suggestions that have an edit distance of
// 3 or less
.map(|c| (lev_distance(c.as_slice(), cmd), c))
.filter(|&(d, _): &(usize, &String)| d < 4)
.min_by(|&(d, _)| d) {
Some((_, c)) => {
Some(c.to_string())
},
None => None
}
}
fn execute_subcommand(cmd: &str, args: &[String], shell: &mut MultiShell) {
let command = match find_command(cmd) {
Some(command) => command,
None => {
let msg = match find_closest(cmd) {
Some(closest) => format!("No such subcommand\n\n\tDid you mean `{}`?\n", closest),
None => "No such subcommand".to_string()
};
return handle_error(CliError::new(msg, 127), shell)
}
};
let status = Command::new(command)
.args(args)
.stdin(InheritFd(0))
.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status();
match status {
Ok(ExitStatus(0)) => (),
Ok(ExitStatus(i)) => {
handle_error(CliError::new("", i as u32), shell)
}
Ok(ExitSignal(i)) => {
let msg = format!("subcommand failed with signal: {}", i);
handle_error(CliError::new(msg, i as u32), shell)
}
Err(io::IoError{kind,..}) if kind == io::FileNotFound =>
handle_error(CliError::new("No such subcommand", 127), shell),
Err(err) => handle_error(
CliError::new(
format!("Subcommand failed to run: {}", err), 127),
shell)
}
}
/// List all runnable commands. find_command should always succeed
/// if given one of returned command.
fn list_commands() -> BTreeSet<String> {
let command_prefix = "cargo-";
let mut commands = BTreeSet::new();
for dir in list_command_directory().iter() {
let entries = match fs::readdir(dir) {
Ok(entries) => entries,
_ => continue
};
for entry in entries.iter() {
let filename = match entry.filename_str() {
Some(filename) => filename,
_ => continue
};
if filename.starts_with(command_prefix) &&
filename.ends_with(os::consts::EXE_SUFFIX) &&
is_executable(entry) {
let command = filename.slice(
command_prefix.len(),
filename.len() - os::consts::EXE_SUFFIX.len());
commands.insert(String::from_str(command));
}
}
}
macro_rules! add_cmd{ ($cmd:ident) => ({
commands.insert(stringify!($cmd).replace("_", "-"));
}) }
each_subcommand!(add_cmd);
commands
}
fn is_executable(path: &Path) -> bool {
match fs::stat(path) {
Ok(io::FileStat{ kind: io::FileType::RegularFile, perm,..}) =>
perm.contains(io::OTHER_EXECUTE),
_ => false
}
}
/// Get `Command` to run given command.
fn find_command(cmd: &str) -> Option<Path> {
let command_exe = format!("cargo-{}{}", cmd, os::consts::EXE_SUFFIX);
let dirs = list_command_directory();
let mut command_paths = dirs.iter().map(|dir| dir.join(command_exe.as_slice()));
command_paths.find(|path| path.exists())
}
/// List candidate locations where subcommands might be installed.
fn list_command_directory() -> Vec<Path>
|
{
let mut dirs = vec![];
match os::self_exe_path() {
Some(path) => {
dirs.push(path.join("../lib/cargo"));
dirs.push(path);
},
None => {}
};
match std::os::getenv("PATH") {
Some(val) => {
for dir in os::split_paths(val).iter() {
dirs.push(Path::new(dir))
}
},
None => {}
};
dirs
}
|
identifier_body
|
|
cargo.rs
|
#![allow(unstable)]
extern crate "rustc-serialize" as rustc_serialize;
extern crate cargo;
#[macro_use] extern crate log;
use std::collections::BTreeSet;
use std::os;
use std::io;
use std::io::fs::{self, PathExtensions};
use std::io::process::{Command,InheritFd,ExitStatus,ExitSignal};
use cargo::{execute_main_without_stdin, handle_error, shell};
use cargo::core::MultiShell;
use cargo::util::{CliError, CliResult, lev_distance};
#[derive(RustcDecodable)]
struct Flags {
flag_list: bool,
flag_verbose: bool,
arg_command: String,
arg_args: Vec<String>,
}
const USAGE: &'static str = "
Rust's package manager
Usage:
cargo <command> [<args>...]
cargo [options]
Options:
-h, --help Display this message
-V, --version Print version info and exit
--list List installed commands
-v, --verbose Use verbose output
Some common cargo commands are:
build Compile the current project
clean Remove the target directory
doc Build this project's and its dependencies' documentation
new Create a new cargo project
run Build and execute src/main.rs
test Run the tests
bench Run the benchmarks
update Update dependencies listed in Cargo.lock
See 'cargo help <command>' for more information on a specific command.
";
fn main() {
execute_main_without_stdin(execute, true, USAGE)
}
macro_rules! each_subcommand{ ($mac:ident) => ({
$mac!(bench);
$mac!(build);
$mac!(clean);
$mac!(config_for_key);
$mac!(config_list);
$mac!(doc);
$mac!(fetch);
$mac!(generate_lockfile);
$mac!(git_checkout);
$mac!(help);
$mac!(locate_project);
$mac!(login);
$mac!(new);
$mac!(owner);
$mac!(package);
$mac!(pkgid);
$mac!(publish);
$mac!(read_manifest);
$mac!(run);
$mac!(search);
$mac!(test);
$mac!(update);
$mac!(verify_project);
$mac!(version);
$mac!(yank);
}) }
/**
The top-level `cargo` command handles configuration and project location
because they are fundamental (and intertwined). Other commands can rely
on this top-level information.
*/
fn execute(flags: Flags, shell: &mut MultiShell) -> CliResult<Option<()>> {
debug!("executing; cmd=cargo; args={:?}", os::args());
shell.set_verbose(flags.flag_verbose);
if flags.flag_list {
println!("Installed Commands:");
for command in list_commands().into_iter() {
println!(" {}", command);
};
return Ok(None)
}
let (mut args, command) = match flags.arg_command.as_slice() {
"" | "help" if flags.arg_args.len() == 0 => {
shell.set_verbose(true);
let args = &[os::args()[0].clone(), "-h".to_string()];
let r = cargo::call_main_without_stdin(execute, shell, USAGE, args,
false);
cargo::process_executed(r, shell);
return Ok(None)
}
"help" if flags.arg_args[0].as_slice() == "-h" ||
flags.arg_args[0].as_slice() == "--help" =>
(flags.arg_args, "help"),
"help" => (vec!["-h".to_string()], flags.arg_args[0].as_slice()),
s => (flags.arg_args.clone(), s),
};
args.insert(0, command.to_string());
args.insert(0, os::args()[0].clone());
macro_rules! cmd{ ($name:ident) => (
if command.as_slice() == stringify!($name).replace("_", "-").as_slice() {
mod $name;
shell.set_verbose(true);
let r = cargo::call_main_without_stdin($name::execute, shell,
$name::USAGE,
args.as_slice(),
false);
cargo::process_executed(r, shell);
return Ok(None)
}
) }
each_subcommand!(cmd);
execute_subcommand(command.as_slice(), args.as_slice(), shell);
Ok(None)
}
fn find_closest(cmd: &str) -> Option<String> {
match list_commands().iter()
// doing it this way (instead of just.min_by(|c| c.lev_distance(cmd)))
// allows us to only make suggestions that have an edit distance of
// 3 or less
.map(|c| (lev_distance(c.as_slice(), cmd), c))
.filter(|&(d, _): &(usize, &String)| d < 4)
.min_by(|&(d, _)| d) {
Some((_, c)) => {
Some(c.to_string())
},
None => None
}
}
fn
|
(cmd: &str, args: &[String], shell: &mut MultiShell) {
let command = match find_command(cmd) {
Some(command) => command,
None => {
let msg = match find_closest(cmd) {
Some(closest) => format!("No such subcommand\n\n\tDid you mean `{}`?\n", closest),
None => "No such subcommand".to_string()
};
return handle_error(CliError::new(msg, 127), shell)
}
};
let status = Command::new(command)
.args(args)
.stdin(InheritFd(0))
.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status();
match status {
Ok(ExitStatus(0)) => (),
Ok(ExitStatus(i)) => {
handle_error(CliError::new("", i as u32), shell)
}
Ok(ExitSignal(i)) => {
let msg = format!("subcommand failed with signal: {}", i);
handle_error(CliError::new(msg, i as u32), shell)
}
Err(io::IoError{kind,..}) if kind == io::FileNotFound =>
handle_error(CliError::new("No such subcommand", 127), shell),
Err(err) => handle_error(
CliError::new(
format!("Subcommand failed to run: {}", err), 127),
shell)
}
}
/// List all runnable commands. find_command should always succeed
/// if given one of returned command.
fn list_commands() -> BTreeSet<String> {
let command_prefix = "cargo-";
let mut commands = BTreeSet::new();
for dir in list_command_directory().iter() {
let entries = match fs::readdir(dir) {
Ok(entries) => entries,
_ => continue
};
for entry in entries.iter() {
let filename = match entry.filename_str() {
Some(filename) => filename,
_ => continue
};
if filename.starts_with(command_prefix) &&
filename.ends_with(os::consts::EXE_SUFFIX) &&
is_executable(entry) {
let command = filename.slice(
command_prefix.len(),
filename.len() - os::consts::EXE_SUFFIX.len());
commands.insert(String::from_str(command));
}
}
}
macro_rules! add_cmd{ ($cmd:ident) => ({
commands.insert(stringify!($cmd).replace("_", "-"));
}) }
each_subcommand!(add_cmd);
commands
}
fn is_executable(path: &Path) -> bool {
match fs::stat(path) {
Ok(io::FileStat{ kind: io::FileType::RegularFile, perm,..}) =>
perm.contains(io::OTHER_EXECUTE),
_ => false
}
}
/// Get `Command` to run given command.
fn find_command(cmd: &str) -> Option<Path> {
let command_exe = format!("cargo-{}{}", cmd, os::consts::EXE_SUFFIX);
let dirs = list_command_directory();
let mut command_paths = dirs.iter().map(|dir| dir.join(command_exe.as_slice()));
command_paths.find(|path| path.exists())
}
/// List candidate locations where subcommands might be installed.
fn list_command_directory() -> Vec<Path> {
let mut dirs = vec![];
match os::self_exe_path() {
Some(path) => {
dirs.push(path.join("../lib/cargo"));
dirs.push(path);
},
None => {}
};
match std::os::getenv("PATH") {
Some(val) => {
for dir in os::split_paths(val).iter() {
dirs.push(Path::new(dir))
}
},
None => {}
};
dirs
}
|
execute_subcommand
|
identifier_name
|
setup.rs
|
// This file is part of rust-web/twig
//
// For the copyright and license information, please view the LICENSE
// file that was distributed with this source code.
//! Stores the Twig configuration.
use std::path::Path;
use std::rc::Rc;
use extension;
use extension::api::Extension;
use engine::{Engine, options, Options, extension_registry, ExtensionRegistry};
use engine::error::TwigError;
use api::error::Traced;
#[allow(dead_code)]
pub const VERSION: &'static str = "1.18.1";
#[derive(Debug)]
pub struct Setup {
opt: Options,
ext: ExtensionRegistry,
}
impl Default for Setup {
fn default() -> Setup {
let mut ext = ExtensionRegistry::default();
ext.push(extension::Core::new()).unwrap(); // core extension
Setup {
opt: Options::default(),
ext: ext,
}
}
}
/// Builds an instance of the Twig Engine, according to supplied options and engine extensions.
///
/// The following extensions will be registered by default:
/// * core
/// * escaper
/// * optimizer
///
/// # Examples
///
/// ```
/// use twig::{Setup, Engine};
/// use twig::extension::Debug;
///
/// let mut setup = Setup::default()
/// .set_strict_variables(true)
/// .add_extension(Debug::new()).unwrap();
/// let engine = Engine::new(setup).unwrap();
/// ```
#[allow(dead_code)]
impl Setup {
/// Create engine from setup.
///
/// # Examples
///
/// ```
/// use twig::Setup;
///
/// let twig = Setup::default().engine().unwrap();
/// ```
pub fn engine(mut self) -> Result<Engine, Traced<TwigError>> {
let mut c = Engine::default();
let o = self.opt;
// add default extensions
try_traced!(self.ext.push(extension::Escaper::new(o.autoescape)));
try_traced!(self.ext.push(extension::Optimizer::new(o.optimizations)));
// init extensions
try_traced!(self.ext.init(&mut c));
c.ext = Some(Rc::new(self.ext));
// TODO: register staging extension (!)
// // init staging extension
// let staging = ext::Staging::new();
// try_traced!(c.init_extension(&*staging));
// c.ext_staging = Some(staging);
return Ok(c);
}
/// Registers an extension
pub fn add_extension(mut self, extension: Box<Extension>) -> Result<Self, Traced<TwigError>> {
try_traced!(self.ext.push(extension));
Ok(self)
}
/// When set to true, it automatically set "auto_reload" to true as well
/// (default to false)
pub fn set_debug(mut self, debug: bool) -> Self {
self.opt.debug = debug;
self
}
/// The charset used by the templates (default to UTF-8)
pub fn set_charset(mut self, set_charset: options::Charset) -> Self {
self.opt.charset = set_charset;
self
}
/// Whether to ignore invalid variables in templates
/// (default to false).
pub fn set_strict_variables(mut self, strict_variables: bool) -> Self {
self.opt.strict_variables = strict_variables;
self
}
/// Whether to enable auto-escaping (default to html):
/// * false: disable auto-escaping
/// * true: equivalent to html
/// * html, js: set the autoescaping to one of the supported strategies
/// * filename: set the autoescaping strategy based on the template filename extension
/// * PHP callback: a PHP callback that returns an escaping strategy based on the template "filename"
pub fn set_autoescape(mut self, autoescape: options::Autoescape) -> Self
|
/// An absolute path where to store the compiled templates (optional)
pub fn set_cache(mut self, cache: Option<&Path>) -> Self {
self.opt.cache = cache.map(|reference| reference.to_owned());
self
}
/// Whether to reload the template if the original source changed (optional).
/// If you don't provide the auto_reload option, it will be
/// determined automatically based on the debug value.
pub fn set_auto_reload(mut self, auto_reload: Option<bool>) -> Self {
self.opt.auto_reload = auto_reload;
self
}
/// A flag that indicates whether optimizations are applied
pub fn set_optimizations(mut self, optimizations: options::Optimizations) -> Self {
self.opt.optimizations = optimizations;
self
}
/// Get all options
pub fn options(&self) -> &Options {
&self.opt
}
/// Get all registered extensions
pub fn extensions(&self) -> extension_registry::Iter {
self.ext.iter()
}
}
#[allow(dead_code)]
#[cfg(test)]
mod test {
// use super::*;
// #[test]
// pub fn get_unary_operators() {
// let mut e = Environment;
// e.get_unary_operators();
// }
// #[test]
// pub fn get_binary_operators() {
// let mut e = Environment;
// e.get_binary_operators();
// }
}
|
{
self.opt.autoescape = autoescape;
self
}
|
identifier_body
|
setup.rs
|
// This file is part of rust-web/twig
//
// For the copyright and license information, please view the LICENSE
// file that was distributed with this source code.
//! Stores the Twig configuration.
use std::path::Path;
use std::rc::Rc;
use extension;
use extension::api::Extension;
use engine::{Engine, options, Options, extension_registry, ExtensionRegistry};
use engine::error::TwigError;
use api::error::Traced;
#[allow(dead_code)]
pub const VERSION: &'static str = "1.18.1";
#[derive(Debug)]
pub struct Setup {
opt: Options,
ext: ExtensionRegistry,
}
impl Default for Setup {
fn default() -> Setup {
let mut ext = ExtensionRegistry::default();
ext.push(extension::Core::new()).unwrap(); // core extension
Setup {
opt: Options::default(),
ext: ext,
}
}
}
/// Builds an instance of the Twig Engine, according to supplied options and engine extensions.
///
/// The following extensions will be registered by default:
/// * core
/// * escaper
/// * optimizer
///
/// # Examples
///
/// ```
/// use twig::{Setup, Engine};
/// use twig::extension::Debug;
///
/// let mut setup = Setup::default()
/// .set_strict_variables(true)
/// .add_extension(Debug::new()).unwrap();
/// let engine = Engine::new(setup).unwrap();
/// ```
#[allow(dead_code)]
impl Setup {
/// Create engine from setup.
///
/// # Examples
///
/// ```
/// use twig::Setup;
///
/// let twig = Setup::default().engine().unwrap();
/// ```
pub fn engine(mut self) -> Result<Engine, Traced<TwigError>> {
let mut c = Engine::default();
let o = self.opt;
// add default extensions
try_traced!(self.ext.push(extension::Escaper::new(o.autoescape)));
try_traced!(self.ext.push(extension::Optimizer::new(o.optimizations)));
// init extensions
try_traced!(self.ext.init(&mut c));
c.ext = Some(Rc::new(self.ext));
// TODO: register staging extension (!)
// // init staging extension
// let staging = ext::Staging::new();
// try_traced!(c.init_extension(&*staging));
// c.ext_staging = Some(staging);
return Ok(c);
}
/// Registers an extension
pub fn add_extension(mut self, extension: Box<Extension>) -> Result<Self, Traced<TwigError>> {
try_traced!(self.ext.push(extension));
Ok(self)
}
/// When set to true, it automatically set "auto_reload" to true as well
/// (default to false)
pub fn set_debug(mut self, debug: bool) -> Self {
self.opt.debug = debug;
self
}
/// The charset used by the templates (default to UTF-8)
pub fn set_charset(mut self, set_charset: options::Charset) -> Self {
self.opt.charset = set_charset;
self
}
/// Whether to ignore invalid variables in templates
/// (default to false).
pub fn set_strict_variables(mut self, strict_variables: bool) -> Self {
self.opt.strict_variables = strict_variables;
self
}
/// Whether to enable auto-escaping (default to html):
/// * false: disable auto-escaping
/// * true: equivalent to html
/// * html, js: set the autoescaping to one of the supported strategies
/// * filename: set the autoescaping strategy based on the template filename extension
/// * PHP callback: a PHP callback that returns an escaping strategy based on the template "filename"
pub fn set_autoescape(mut self, autoescape: options::Autoescape) -> Self {
self.opt.autoescape = autoescape;
self
}
/// An absolute path where to store the compiled templates (optional)
pub fn set_cache(mut self, cache: Option<&Path>) -> Self {
self.opt.cache = cache.map(|reference| reference.to_owned());
self
}
/// Whether to reload the template if the original source changed (optional).
/// If you don't provide the auto_reload option, it will be
/// determined automatically based on the debug value.
pub fn
|
(mut self, auto_reload: Option<bool>) -> Self {
self.opt.auto_reload = auto_reload;
self
}
/// A flag that indicates whether optimizations are applied
pub fn set_optimizations(mut self, optimizations: options::Optimizations) -> Self {
self.opt.optimizations = optimizations;
self
}
/// Get all options
pub fn options(&self) -> &Options {
&self.opt
}
/// Get all registered extensions
pub fn extensions(&self) -> extension_registry::Iter {
self.ext.iter()
}
}
#[allow(dead_code)]
#[cfg(test)]
mod test {
// use super::*;
// #[test]
// pub fn get_unary_operators() {
// let mut e = Environment;
// e.get_unary_operators();
// }
// #[test]
// pub fn get_binary_operators() {
// let mut e = Environment;
// e.get_binary_operators();
// }
}
|
set_auto_reload
|
identifier_name
|
setup.rs
|
// This file is part of rust-web/twig
//
// For the copyright and license information, please view the LICENSE
// file that was distributed with this source code.
//! Stores the Twig configuration.
use std::path::Path;
use std::rc::Rc;
use extension;
use extension::api::Extension;
use engine::{Engine, options, Options, extension_registry, ExtensionRegistry};
use engine::error::TwigError;
use api::error::Traced;
#[allow(dead_code)]
pub const VERSION: &'static str = "1.18.1";
#[derive(Debug)]
pub struct Setup {
opt: Options,
ext: ExtensionRegistry,
}
impl Default for Setup {
fn default() -> Setup {
let mut ext = ExtensionRegistry::default();
ext.push(extension::Core::new()).unwrap(); // core extension
Setup {
opt: Options::default(),
ext: ext,
}
}
}
/// Builds an instance of the Twig Engine, according to supplied options and engine extensions.
///
/// The following extensions will be registered by default:
/// * core
/// * escaper
/// * optimizer
///
/// # Examples
///
/// ```
/// use twig::{Setup, Engine};
/// use twig::extension::Debug;
///
/// let mut setup = Setup::default()
/// .set_strict_variables(true)
/// .add_extension(Debug::new()).unwrap();
/// let engine = Engine::new(setup).unwrap();
/// ```
#[allow(dead_code)]
impl Setup {
/// Create engine from setup.
///
/// # Examples
///
/// ```
/// use twig::Setup;
///
/// let twig = Setup::default().engine().unwrap();
/// ```
pub fn engine(mut self) -> Result<Engine, Traced<TwigError>> {
let mut c = Engine::default();
let o = self.opt;
// add default extensions
try_traced!(self.ext.push(extension::Escaper::new(o.autoescape)));
try_traced!(self.ext.push(extension::Optimizer::new(o.optimizations)));
// init extensions
try_traced!(self.ext.init(&mut c));
c.ext = Some(Rc::new(self.ext));
// TODO: register staging extension (!)
// // init staging extension
// let staging = ext::Staging::new();
// try_traced!(c.init_extension(&*staging));
// c.ext_staging = Some(staging);
return Ok(c);
}
/// Registers an extension
pub fn add_extension(mut self, extension: Box<Extension>) -> Result<Self, Traced<TwigError>> {
try_traced!(self.ext.push(extension));
Ok(self)
}
/// When set to true, it automatically set "auto_reload" to true as well
/// (default to false)
pub fn set_debug(mut self, debug: bool) -> Self {
self.opt.debug = debug;
self
}
/// The charset used by the templates (default to UTF-8)
pub fn set_charset(mut self, set_charset: options::Charset) -> Self {
self.opt.charset = set_charset;
self
}
/// Whether to ignore invalid variables in templates
/// (default to false).
pub fn set_strict_variables(mut self, strict_variables: bool) -> Self {
self.opt.strict_variables = strict_variables;
self
}
/// Whether to enable auto-escaping (default to html):
/// * false: disable auto-escaping
/// * true: equivalent to html
/// * html, js: set the autoescaping to one of the supported strategies
/// * filename: set the autoescaping strategy based on the template filename extension
/// * PHP callback: a PHP callback that returns an escaping strategy based on the template "filename"
pub fn set_autoescape(mut self, autoescape: options::Autoescape) -> Self {
self.opt.autoescape = autoescape;
self
}
/// An absolute path where to store the compiled templates (optional)
pub fn set_cache(mut self, cache: Option<&Path>) -> Self {
self.opt.cache = cache.map(|reference| reference.to_owned());
self
}
/// Whether to reload the template if the original source changed (optional).
/// If you don't provide the auto_reload option, it will be
/// determined automatically based on the debug value.
pub fn set_auto_reload(mut self, auto_reload: Option<bool>) -> Self {
self.opt.auto_reload = auto_reload;
self
}
/// A flag that indicates whether optimizations are applied
pub fn set_optimizations(mut self, optimizations: options::Optimizations) -> Self {
self.opt.optimizations = optimizations;
self
}
/// Get all options
pub fn options(&self) -> &Options {
&self.opt
}
/// Get all registered extensions
pub fn extensions(&self) -> extension_registry::Iter {
self.ext.iter()
}
}
#[allow(dead_code)]
#[cfg(test)]
mod test {
// use super::*;
// #[test]
// pub fn get_unary_operators() {
// let mut e = Environment;
// e.get_unary_operators();
// }
// #[test]
// pub fn get_binary_operators() {
// let mut e = Environment;
// e.get_binary_operators();
|
// }
}
|
random_line_split
|
|
bbox.rs
|
//! Bounding boxes that know their coordinate space.
use crate::coord_units::CoordUnits;
use crate::rect::Rect;
use crate::transform::Transform;
#[derive(Debug, Default, Copy, Clone)]
pub struct BoundingBox {
transform: Transform,
pub rect: Option<Rect>, // without stroke
pub ink_rect: Option<Rect>, // with stroke
}
impl BoundingBox {
pub fn new() -> BoundingBox {
Default::default()
}
pub fn with_transform(self, transform: Transform) -> BoundingBox {
BoundingBox { transform,..self }
}
pub fn with_rect(self, rect: Rect) -> BoundingBox {
BoundingBox {
rect: Some(rect),
..self
}
}
pub fn with_ink_rect(self, ink_rect: Rect) -> BoundingBox {
BoundingBox {
ink_rect: Some(ink_rect),
..self
}
}
pub fn clear(mut self) {
self.rect = None;
self.ink_rect = None;
}
fn combine(&mut self, src: &BoundingBox, clip: bool) {
if src.rect.is_none() && src.ink_rect.is_none() {
return;
}
// this will panic!() if it's not invertible... should we check on our own?
let transform = self
.transform
.invert()
.unwrap()
.pre_transform(&src.transform);
self.rect = combine_rects(self.rect, src.rect, &transform, clip);
self.ink_rect = combine_rects(self.ink_rect, src.ink_rect, &transform, clip);
}
pub fn insert(&mut self, src: &BoundingBox) {
self.combine(src, false);
}
pub fn clip(&mut self, src: &BoundingBox) {
self.combine(src, true);
}
/// Creates a transform to map to the `self.rect`.
///
/// This depends on a `CoordUnits` parameter. When this is
/// `CoordUnits::ObjectBoundingBox`, the bounding box must not be
/// empty, since the calling code would then not have a usable
/// size to work with. In that case, if the bbox is empty, this
/// function returns `Err(())`.
///
/// Usually calling code can simply ignore the action it was about
/// to take if this function returns an error.
pub fn rect_to_transform(&self, units: CoordUnits) -> Result<Transform, ()> {
match units {
CoordUnits::UserSpaceOnUse => Ok(Transform::identity()),
CoordUnits::ObjectBoundingBox => {
if self.rect.as_ref().map_or(true, |r| r.is_empty()) {
Err(())
} else {
let r = self.rect.as_ref().unwrap();
let t = Transform::new_unchecked(r.width(), 0.0, 0.0, r.height(), r.x0, r.y0);
if t.is_invertible() {
Ok(t)
} else {
Err(())
}
}
}
}
}
|
r1: Option<Rect>,
r2: Option<Rect>,
transform: &Transform,
clip: bool,
) -> Option<Rect> {
match (r1, r2, clip) {
(r1, None, _) => r1,
(None, Some(r2), _) => Some(transform.transform_rect(&r2)),
(Some(r1), Some(r2), true) => transform
.transform_rect(&r2)
.intersection(&r1)
.or_else(|| Some(Rect::default())),
(Some(r1), Some(r2), false) => Some(transform.transform_rect(&r2).union(&r1)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn combine() {
let r1 = Rect::new(1.0, 2.0, 3.0, 4.0);
let r2 = Rect::new(1.5, 2.5, 3.5, 4.5);
let r3 = Rect::new(10.0, 11.0, 12.0, 13.0);
let t = Transform::new_unchecked(1.0, 0.0, 0.0, 1.0, 0.5, 0.5);
let res = combine_rects(None, None, &t, true);
assert_eq!(res, None);
let res = combine_rects(None, None, &t, false);
assert_eq!(res, None);
let res = combine_rects(Some(r1), None, &t, true);
assert_eq!(res, Some(r1));
let res = combine_rects(Some(r1), None, &t, false);
assert_eq!(res, Some(r1));
let res = combine_rects(None, Some(r2), &t, true);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 4.0, 5.0)));
let res = combine_rects(None, Some(r2), &t, false);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 4.0, 5.0)));
let res = combine_rects(Some(r1), Some(r2), &t, true);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 3.0, 4.0)));
let res = combine_rects(Some(r1), Some(r3), &t, true);
assert_eq!(res, Some(Rect::default()));
let res = combine_rects(Some(r1), Some(r2), &t, false);
assert_eq!(res, Some(Rect::new(1.0, 2.0, 4.0, 5.0)));
}
}
|
}
fn combine_rects(
|
random_line_split
|
bbox.rs
|
//! Bounding boxes that know their coordinate space.
use crate::coord_units::CoordUnits;
use crate::rect::Rect;
use crate::transform::Transform;
#[derive(Debug, Default, Copy, Clone)]
pub struct BoundingBox {
transform: Transform,
pub rect: Option<Rect>, // without stroke
pub ink_rect: Option<Rect>, // with stroke
}
impl BoundingBox {
pub fn new() -> BoundingBox {
Default::default()
}
pub fn with_transform(self, transform: Transform) -> BoundingBox {
BoundingBox { transform,..self }
}
pub fn with_rect(self, rect: Rect) -> BoundingBox {
BoundingBox {
rect: Some(rect),
..self
}
}
pub fn with_ink_rect(self, ink_rect: Rect) -> BoundingBox {
BoundingBox {
ink_rect: Some(ink_rect),
..self
}
}
pub fn
|
(mut self) {
self.rect = None;
self.ink_rect = None;
}
fn combine(&mut self, src: &BoundingBox, clip: bool) {
if src.rect.is_none() && src.ink_rect.is_none() {
return;
}
// this will panic!() if it's not invertible... should we check on our own?
let transform = self
.transform
.invert()
.unwrap()
.pre_transform(&src.transform);
self.rect = combine_rects(self.rect, src.rect, &transform, clip);
self.ink_rect = combine_rects(self.ink_rect, src.ink_rect, &transform, clip);
}
pub fn insert(&mut self, src: &BoundingBox) {
self.combine(src, false);
}
pub fn clip(&mut self, src: &BoundingBox) {
self.combine(src, true);
}
/// Creates a transform to map to the `self.rect`.
///
/// This depends on a `CoordUnits` parameter. When this is
/// `CoordUnits::ObjectBoundingBox`, the bounding box must not be
/// empty, since the calling code would then not have a usable
/// size to work with. In that case, if the bbox is empty, this
/// function returns `Err(())`.
///
/// Usually calling code can simply ignore the action it was about
/// to take if this function returns an error.
pub fn rect_to_transform(&self, units: CoordUnits) -> Result<Transform, ()> {
match units {
CoordUnits::UserSpaceOnUse => Ok(Transform::identity()),
CoordUnits::ObjectBoundingBox => {
if self.rect.as_ref().map_or(true, |r| r.is_empty()) {
Err(())
} else {
let r = self.rect.as_ref().unwrap();
let t = Transform::new_unchecked(r.width(), 0.0, 0.0, r.height(), r.x0, r.y0);
if t.is_invertible() {
Ok(t)
} else {
Err(())
}
}
}
}
}
}
fn combine_rects(
r1: Option<Rect>,
r2: Option<Rect>,
transform: &Transform,
clip: bool,
) -> Option<Rect> {
match (r1, r2, clip) {
(r1, None, _) => r1,
(None, Some(r2), _) => Some(transform.transform_rect(&r2)),
(Some(r1), Some(r2), true) => transform
.transform_rect(&r2)
.intersection(&r1)
.or_else(|| Some(Rect::default())),
(Some(r1), Some(r2), false) => Some(transform.transform_rect(&r2).union(&r1)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn combine() {
let r1 = Rect::new(1.0, 2.0, 3.0, 4.0);
let r2 = Rect::new(1.5, 2.5, 3.5, 4.5);
let r3 = Rect::new(10.0, 11.0, 12.0, 13.0);
let t = Transform::new_unchecked(1.0, 0.0, 0.0, 1.0, 0.5, 0.5);
let res = combine_rects(None, None, &t, true);
assert_eq!(res, None);
let res = combine_rects(None, None, &t, false);
assert_eq!(res, None);
let res = combine_rects(Some(r1), None, &t, true);
assert_eq!(res, Some(r1));
let res = combine_rects(Some(r1), None, &t, false);
assert_eq!(res, Some(r1));
let res = combine_rects(None, Some(r2), &t, true);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 4.0, 5.0)));
let res = combine_rects(None, Some(r2), &t, false);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 4.0, 5.0)));
let res = combine_rects(Some(r1), Some(r2), &t, true);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 3.0, 4.0)));
let res = combine_rects(Some(r1), Some(r3), &t, true);
assert_eq!(res, Some(Rect::default()));
let res = combine_rects(Some(r1), Some(r2), &t, false);
assert_eq!(res, Some(Rect::new(1.0, 2.0, 4.0, 5.0)));
}
}
|
clear
|
identifier_name
|
bbox.rs
|
//! Bounding boxes that know their coordinate space.
use crate::coord_units::CoordUnits;
use crate::rect::Rect;
use crate::transform::Transform;
#[derive(Debug, Default, Copy, Clone)]
pub struct BoundingBox {
transform: Transform,
pub rect: Option<Rect>, // without stroke
pub ink_rect: Option<Rect>, // with stroke
}
impl BoundingBox {
pub fn new() -> BoundingBox {
Default::default()
}
pub fn with_transform(self, transform: Transform) -> BoundingBox {
BoundingBox { transform,..self }
}
pub fn with_rect(self, rect: Rect) -> BoundingBox
|
pub fn with_ink_rect(self, ink_rect: Rect) -> BoundingBox {
BoundingBox {
ink_rect: Some(ink_rect),
..self
}
}
pub fn clear(mut self) {
self.rect = None;
self.ink_rect = None;
}
fn combine(&mut self, src: &BoundingBox, clip: bool) {
if src.rect.is_none() && src.ink_rect.is_none() {
return;
}
// this will panic!() if it's not invertible... should we check on our own?
let transform = self
.transform
.invert()
.unwrap()
.pre_transform(&src.transform);
self.rect = combine_rects(self.rect, src.rect, &transform, clip);
self.ink_rect = combine_rects(self.ink_rect, src.ink_rect, &transform, clip);
}
pub fn insert(&mut self, src: &BoundingBox) {
self.combine(src, false);
}
pub fn clip(&mut self, src: &BoundingBox) {
self.combine(src, true);
}
/// Creates a transform to map to the `self.rect`.
///
/// This depends on a `CoordUnits` parameter. When this is
/// `CoordUnits::ObjectBoundingBox`, the bounding box must not be
/// empty, since the calling code would then not have a usable
/// size to work with. In that case, if the bbox is empty, this
/// function returns `Err(())`.
///
/// Usually calling code can simply ignore the action it was about
/// to take if this function returns an error.
pub fn rect_to_transform(&self, units: CoordUnits) -> Result<Transform, ()> {
match units {
CoordUnits::UserSpaceOnUse => Ok(Transform::identity()),
CoordUnits::ObjectBoundingBox => {
if self.rect.as_ref().map_or(true, |r| r.is_empty()) {
Err(())
} else {
let r = self.rect.as_ref().unwrap();
let t = Transform::new_unchecked(r.width(), 0.0, 0.0, r.height(), r.x0, r.y0);
if t.is_invertible() {
Ok(t)
} else {
Err(())
}
}
}
}
}
}
fn combine_rects(
r1: Option<Rect>,
r2: Option<Rect>,
transform: &Transform,
clip: bool,
) -> Option<Rect> {
match (r1, r2, clip) {
(r1, None, _) => r1,
(None, Some(r2), _) => Some(transform.transform_rect(&r2)),
(Some(r1), Some(r2), true) => transform
.transform_rect(&r2)
.intersection(&r1)
.or_else(|| Some(Rect::default())),
(Some(r1), Some(r2), false) => Some(transform.transform_rect(&r2).union(&r1)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn combine() {
let r1 = Rect::new(1.0, 2.0, 3.0, 4.0);
let r2 = Rect::new(1.5, 2.5, 3.5, 4.5);
let r3 = Rect::new(10.0, 11.0, 12.0, 13.0);
let t = Transform::new_unchecked(1.0, 0.0, 0.0, 1.0, 0.5, 0.5);
let res = combine_rects(None, None, &t, true);
assert_eq!(res, None);
let res = combine_rects(None, None, &t, false);
assert_eq!(res, None);
let res = combine_rects(Some(r1), None, &t, true);
assert_eq!(res, Some(r1));
let res = combine_rects(Some(r1), None, &t, false);
assert_eq!(res, Some(r1));
let res = combine_rects(None, Some(r2), &t, true);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 4.0, 5.0)));
let res = combine_rects(None, Some(r2), &t, false);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 4.0, 5.0)));
let res = combine_rects(Some(r1), Some(r2), &t, true);
assert_eq!(res, Some(Rect::new(2.0, 3.0, 3.0, 4.0)));
let res = combine_rects(Some(r1), Some(r3), &t, true);
assert_eq!(res, Some(Rect::default()));
let res = combine_rects(Some(r1), Some(r2), &t, false);
assert_eq!(res, Some(Rect::new(1.0, 2.0, 4.0, 5.0)));
}
}
|
{
BoundingBox {
rect: Some(rect),
..self
}
}
|
identifier_body
|
links.rs
|
use std::collections::HashMap;
use core::PackageSet;
use util::{CargoResult, human};
// Validate that there are no duplicated native libraries among packages and
// that all packages with `links` also have a build script.
pub fn
|
(deps: &PackageSet) -> CargoResult<()> {
let mut map = HashMap::new();
for dep in deps.iter() {
let lib = match dep.manifest().links() {
Some(lib) => lib,
None => continue,
};
match map.get(&lib) {
Some(previous) => {
return Err(human(format!("native library `{}` is being linked \
to by more than one package, and \
can only be linked to by one \
package\n\n {}\n {}",
lib, previous, dep.package_id())))
}
None => {}
}
if!dep.manifest().targets().iter().any(|t| t.is_custom_build()) {
return Err(human(format!("package `{}` specifies that it links to \
`{}` but does not have a custom build \
script", dep.package_id(), lib)))
}
map.insert(lib, dep.package_id());
}
Ok(())
}
|
validate
|
identifier_name
|
links.rs
|
use std::collections::HashMap;
use core::PackageSet;
use util::{CargoResult, human};
// Validate that there are no duplicated native libraries among packages and
// that all packages with `links` also have a build script.
pub fn validate(deps: &PackageSet) -> CargoResult<()> {
let mut map = HashMap::new();
for dep in deps.iter() {
let lib = match dep.manifest().links() {
Some(lib) => lib,
None => continue,
};
match map.get(&lib) {
Some(previous) => {
return Err(human(format!("native library `{}` is being linked \
to by more than one package, and \
can only be linked to by one \
package\n\n {}\n {}",
lib, previous, dep.package_id())))
}
None => {}
}
if!dep.manifest().targets().iter().any(|t| t.is_custom_build()) {
return Err(human(format!("package `{}` specifies that it links to \
`{}` but does not have a custom build \
script", dep.package_id(), lib)))
}
map.insert(lib, dep.package_id());
}
Ok(())
|
}
|
random_line_split
|
|
links.rs
|
use std::collections::HashMap;
use core::PackageSet;
use util::{CargoResult, human};
// Validate that there are no duplicated native libraries among packages and
// that all packages with `links` also have a build script.
pub fn validate(deps: &PackageSet) -> CargoResult<()>
|
`{}` but does not have a custom build \
script", dep.package_id(), lib)))
}
map.insert(lib, dep.package_id());
}
Ok(())
}
|
{
let mut map = HashMap::new();
for dep in deps.iter() {
let lib = match dep.manifest().links() {
Some(lib) => lib,
None => continue,
};
match map.get(&lib) {
Some(previous) => {
return Err(human(format!("native library `{}` is being linked \
to by more than one package, and \
can only be linked to by one \
package\n\n {}\n {}",
lib, previous, dep.package_id())))
}
None => {}
}
if !dep.manifest().targets().iter().any(|t| t.is_custom_build()) {
return Err(human(format!("package `{}` specifies that it links to \
|
identifier_body
|
writebatch.rs
|
use utils::{tmpdir};
use leveldb::database::{Database};
use leveldb::options::{Options,ReadOptions,WriteOptions};
use leveldb::database::kv::{KV};
use leveldb::database::batch::{Batch,Writebatch,WritebatchIterator};
#[test]
fn test_writebatch() {
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("writebatch");
let database = &mut Database::open(tmp.path(), opts).unwrap();
let batch = &mut Writebatch::new();
batch.put(1, &[1]);
batch.put(2, &[2]);
batch.delete(1);
let wopts = WriteOptions::new();
let ack = database.write(wopts, batch);
assert!(ack.is_ok());
let read_opts = ReadOptions::new();
let res = database.get(read_opts, 2);
match res {
Ok(data) => {
assert!(data.is_some());
let data = data.unwrap();
assert_eq!(data, vec!(2));
},
Err(_) => { panic!("failed reading data") }
}
let read_opts2 = ReadOptions::new();
let res2 = database.get(read_opts2, 1);
match res2 {
Ok(data) => { assert!(data.is_none()) },
Err(_) => { panic!("failed reading data") }
}
}
struct
|
{
put: i32,
deleted: i32,
}
impl WritebatchIterator for Iter {
type K = i32;
fn put(&mut self,
_key: i32,
_value: &[u8]) {
self.put = self.put + 1;
}
fn deleted(&mut self,
_key: i32) {
self.deleted = self.deleted + 1;
}
}
#[test]
fn test_writebatchiter() {
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("writebatch");
let database = &mut Database::open(tmp.path(), opts).unwrap();
let batch = &mut Writebatch::new();
batch.put(1, &[1]);
batch.put(2, &[2]);
batch.delete(1);
let wopts = WriteOptions::new();
let ack = database.write(wopts, batch);
assert!(ack.is_ok());
let iter = Box::new(Iter { put: 0, deleted: 0 });
let iter2 = batch.iterate(iter);
assert_eq!(iter2.put, 2);
assert_eq!(iter2.deleted, 1);
}
|
Iter
|
identifier_name
|
writebatch.rs
|
use utils::{tmpdir};
use leveldb::database::{Database};
use leveldb::options::{Options,ReadOptions,WriteOptions};
use leveldb::database::kv::{KV};
use leveldb::database::batch::{Batch,Writebatch,WritebatchIterator};
#[test]
fn test_writebatch() {
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("writebatch");
let database = &mut Database::open(tmp.path(), opts).unwrap();
let batch = &mut Writebatch::new();
batch.put(1, &[1]);
batch.put(2, &[2]);
batch.delete(1);
let wopts = WriteOptions::new();
let ack = database.write(wopts, batch);
assert!(ack.is_ok());
let read_opts = ReadOptions::new();
let res = database.get(read_opts, 2);
match res {
Ok(data) => {
assert!(data.is_some());
let data = data.unwrap();
assert_eq!(data, vec!(2));
},
Err(_) =>
|
}
let read_opts2 = ReadOptions::new();
let res2 = database.get(read_opts2, 1);
match res2 {
Ok(data) => { assert!(data.is_none()) },
Err(_) => { panic!("failed reading data") }
}
}
struct Iter {
put: i32,
deleted: i32,
}
impl WritebatchIterator for Iter {
type K = i32;
fn put(&mut self,
_key: i32,
_value: &[u8]) {
self.put = self.put + 1;
}
fn deleted(&mut self,
_key: i32) {
self.deleted = self.deleted + 1;
}
}
#[test]
fn test_writebatchiter() {
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("writebatch");
let database = &mut Database::open(tmp.path(), opts).unwrap();
let batch = &mut Writebatch::new();
batch.put(1, &[1]);
batch.put(2, &[2]);
batch.delete(1);
let wopts = WriteOptions::new();
let ack = database.write(wopts, batch);
assert!(ack.is_ok());
let iter = Box::new(Iter { put: 0, deleted: 0 });
let iter2 = batch.iterate(iter);
assert_eq!(iter2.put, 2);
assert_eq!(iter2.deleted, 1);
}
|
{ panic!("failed reading data") }
|
conditional_block
|
writebatch.rs
|
use utils::{tmpdir};
use leveldb::database::{Database};
use leveldb::options::{Options,ReadOptions,WriteOptions};
use leveldb::database::kv::{KV};
use leveldb::database::batch::{Batch,Writebatch,WritebatchIterator};
#[test]
fn test_writebatch() {
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("writebatch");
let database = &mut Database::open(tmp.path(), opts).unwrap();
let batch = &mut Writebatch::new();
batch.put(1, &[1]);
batch.put(2, &[2]);
batch.delete(1);
let wopts = WriteOptions::new();
let ack = database.write(wopts, batch);
assert!(ack.is_ok());
let read_opts = ReadOptions::new();
let res = database.get(read_opts, 2);
match res {
|
let data = data.unwrap();
assert_eq!(data, vec!(2));
},
Err(_) => { panic!("failed reading data") }
}
let read_opts2 = ReadOptions::new();
let res2 = database.get(read_opts2, 1);
match res2 {
Ok(data) => { assert!(data.is_none()) },
Err(_) => { panic!("failed reading data") }
}
}
struct Iter {
put: i32,
deleted: i32,
}
impl WritebatchIterator for Iter {
type K = i32;
fn put(&mut self,
_key: i32,
_value: &[u8]) {
self.put = self.put + 1;
}
fn deleted(&mut self,
_key: i32) {
self.deleted = self.deleted + 1;
}
}
#[test]
fn test_writebatchiter() {
let mut opts = Options::new();
opts.create_if_missing = true;
let tmp = tmpdir("writebatch");
let database = &mut Database::open(tmp.path(), opts).unwrap();
let batch = &mut Writebatch::new();
batch.put(1, &[1]);
batch.put(2, &[2]);
batch.delete(1);
let wopts = WriteOptions::new();
let ack = database.write(wopts, batch);
assert!(ack.is_ok());
let iter = Box::new(Iter { put: 0, deleted: 0 });
let iter2 = batch.iterate(iter);
assert_eq!(iter2.put, 2);
assert_eq!(iter2.deleted, 1);
}
|
Ok(data) => {
assert!(data.is_some());
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.