file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
worker.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::WorkerBinding;
use dom::bindings::codegen::Bindings::WorkerBinding::WorkerMethods;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::InheritTypes::{EventCast, EventTargetCast};
use dom::bindings::error::{Fallible, ErrorResult};
use dom::bindings::error::Error::Syntax;
use dom::bindings::global::{GlobalRef, GlobalField};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::refcounted::Trusted;
use dom::bindings::structuredclone::StructuredCloneData;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflectable, reflect_dom_object};
use dom::dedicatedworkerglobalscope::DedicatedWorkerGlobalScope;
use dom::errorevent::ErrorEvent;
use dom::event::{Event, EventBubbles, EventCancelable, EventHelpers};
use dom::eventtarget::{EventTarget, EventTargetHelpers, EventTargetTypeId};
use dom::messageevent::MessageEvent;
use script_task::{ScriptChan, ScriptMsg, Runnable};
use util::str::DOMString;
use js::jsapi::JSContext;
use js::jsval::{JSVal, UndefinedValue};
use url::UrlParser;
use std::borrow::ToOwned;
use std::cell::Cell;
use std::sync::mpsc::{channel, Sender};
pub type TrustedWorkerAddress = Trusted<Worker>;
#[dom_struct]
pub struct Worker {
eventtarget: EventTarget,
refcount: Cell<uint>,
global: GlobalField,
/// Sender to the Receiver associated with the DedicatedWorkerGlobalScope
/// this Worker created.
sender: Sender<(TrustedWorkerAddress, ScriptMsg)>,
}
impl Worker {
fn new_inherited(global: GlobalRef, sender: Sender<(TrustedWorkerAddress, ScriptMsg)>) -> Worker
|
pub fn new(global: GlobalRef, sender: Sender<(TrustedWorkerAddress, ScriptMsg)>) -> Temporary<Worker> {
reflect_dom_object(box Worker::new_inherited(global, sender),
global,
WorkerBinding::Wrap)
}
// http://www.whatwg.org/html/#dom-worker
pub fn Constructor(global: GlobalRef, script_url: DOMString) -> Fallible<Temporary<Worker>> {
// Step 2-4.
let worker_url = match UrlParser::new().base_url(&global.get_url())
.parse(script_url.as_slice()) {
Ok(url) => url,
Err(_) => return Err(Syntax),
};
let resource_task = global.resource_task();
let (sender, receiver) = channel();
let worker = Worker::new(global, sender.clone()).root();
let worker_ref = Trusted::new(global.get_cx(), worker.r(), global.script_chan());
DedicatedWorkerGlobalScope::run_worker_scope(
worker_url, worker_ref, resource_task, global.script_chan(),
sender, receiver);
Ok(Temporary::from_rooted(worker.r()))
}
pub fn handle_message(address: TrustedWorkerAddress,
data: StructuredCloneData) {
let worker = address.to_temporary().root();
let global = worker.r().global.root();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(worker.r());
let message = data.read(global.r());
MessageEvent::dispatch_jsval(target, global.r(), message);
}
pub fn handle_error_message(address: TrustedWorkerAddress, message: DOMString,
filename: DOMString, lineno: u32, colno: u32) {
let worker = address.to_temporary().root();
let global = worker.r().global.root();
let error = UndefinedValue();
let target: JSRef<EventTarget> = EventTargetCast::from_ref(worker.r());
let errorevent = ErrorEvent::new(global.r(), "error".to_owned(),
EventBubbles::Bubbles, EventCancelable::Cancelable,
message, filename, lineno, colno, error).root();
let event: JSRef<Event> = EventCast::from_ref(errorevent.r());
event.fire(target);
}
}
impl<'a> WorkerMethods for JSRef<'a, Worker> {
fn PostMessage(self, cx: *mut JSContext, message: JSVal) -> ErrorResult {
let data = try!(StructuredCloneData::write(cx, message));
let address = Trusted::new(cx, self, self.global.root().r().script_chan().clone());
self.sender.send((address, ScriptMsg::DOMMessage(data))).unwrap();
Ok(())
}
event_handler!(message, GetOnmessage, SetOnmessage);
}
pub struct WorkerMessageHandler {
addr: TrustedWorkerAddress,
data: StructuredCloneData,
}
impl WorkerMessageHandler {
pub fn new(addr: TrustedWorkerAddress, data: StructuredCloneData) -> WorkerMessageHandler {
WorkerMessageHandler {
addr: addr,
data: data,
}
}
}
impl Runnable for WorkerMessageHandler {
fn handler(self: Box<WorkerMessageHandler>) {
let this = *self;
Worker::handle_message(this.addr, this.data);
}
}
|
{
Worker {
eventtarget: EventTarget::new_inherited(EventTargetTypeId::Worker),
refcount: Cell::new(0),
global: GlobalField::from_rooted(&global),
sender: sender,
}
}
|
identifier_body
|
issue-6128.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax, collections)]
extern crate collections;
use std::collections::HashMap;
trait Graph<Node, Edge> {
fn f(&self, Edge);
fn g(&self, Node);
}
impl<E> Graph<isize, E> for HashMap<isize, isize> {
fn f(&self, _e: E)
|
fn g(&self, _e: isize) {
panic!();
}
}
pub fn main() {
let g : Box<HashMap<isize,isize>> = box HashMap::new();
let _g2 : Box<Graph<isize,isize>> = g as Box<Graph<isize,isize>>;
}
|
{
panic!();
}
|
identifier_body
|
issue-6128.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
|
extern crate collections;
use std::collections::HashMap;
trait Graph<Node, Edge> {
fn f(&self, Edge);
fn g(&self, Node);
}
impl<E> Graph<isize, E> for HashMap<isize, isize> {
fn f(&self, _e: E) {
panic!();
}
fn g(&self, _e: isize) {
panic!();
}
}
pub fn main() {
let g : Box<HashMap<isize,isize>> = box HashMap::new();
let _g2 : Box<Graph<isize,isize>> = g as Box<Graph<isize,isize>>;
}
|
#![allow(unknown_features)]
#![feature(box_syntax, collections)]
|
random_line_split
|
issue-6128.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax, collections)]
extern crate collections;
use std::collections::HashMap;
trait Graph<Node, Edge> {
fn f(&self, Edge);
fn g(&self, Node);
}
impl<E> Graph<isize, E> for HashMap<isize, isize> {
fn
|
(&self, _e: E) {
panic!();
}
fn g(&self, _e: isize) {
panic!();
}
}
pub fn main() {
let g : Box<HashMap<isize,isize>> = box HashMap::new();
let _g2 : Box<Graph<isize,isize>> = g as Box<Graph<isize,isize>>;
}
|
f
|
identifier_name
|
const-eval-overflow2c.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_imports)]
// Note: the relevant lint pass here runs before some of the constant
// evaluation below (e.g., that performed by codegen and llvm), so if you
// change this warn to a deny, then the compiler will exit before
// those errors are detected.
#![deny(const_err)]
use std::fmt;
use std::{i8, i16, i32, i64, isize};
use std::{u8, u16, u32, u64, usize};
const VALS_I8: (i8,) = //~ ERROR any use of this value will cause an error
(
i8::MIN * 2,
);
const VALS_I16: (i16,) = //~ ERROR any use of this value will cause an error
(
i16::MIN * 2,
);
const VALS_I32: (i32,) = //~ ERROR any use of this value will cause an error
(
i32::MIN * 2,
);
const VALS_I64: (i64,) = //~ ERROR any use of this value will cause an error
(
i64::MIN * 2,
);
const VALS_U8: (u8,) = //~ ERROR any use of this value will cause an error
(
u8::MAX * 2,
);
const VALS_U16: (u16,) = ( //~ ERROR any use of this value will cause an error
u16::MAX * 2,
);
const VALS_U32: (u32,) = ( //~ ERROR any use of this value will cause an error
u32::MAX * 2,
);
const VALS_U64: (u64,) = //~ ERROR any use of this value will cause an error
(
u64::MAX * 2,
);
fn main() {
foo(VALS_I8);
foo(VALS_I16);
foo(VALS_I32);
foo(VALS_I64);
foo(VALS_U8);
foo(VALS_U16);
foo(VALS_U32);
foo(VALS_U64);
}
fn
|
<T>(_: T) {
}
|
foo
|
identifier_name
|
const-eval-overflow2c.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_imports)]
// Note: the relevant lint pass here runs before some of the constant
// evaluation below (e.g., that performed by codegen and llvm), so if you
// change this warn to a deny, then the compiler will exit before
// those errors are detected.
#![deny(const_err)]
use std::fmt;
use std::{i8, i16, i32, i64, isize};
use std::{u8, u16, u32, u64, usize};
const VALS_I8: (i8,) = //~ ERROR any use of this value will cause an error
(
i8::MIN * 2,
);
const VALS_I16: (i16,) = //~ ERROR any use of this value will cause an error
(
i16::MIN * 2,
);
const VALS_I32: (i32,) = //~ ERROR any use of this value will cause an error
(
i32::MIN * 2,
);
const VALS_I64: (i64,) = //~ ERROR any use of this value will cause an error
(
i64::MIN * 2,
);
const VALS_U8: (u8,) = //~ ERROR any use of this value will cause an error
(
u8::MAX * 2,
);
const VALS_U16: (u16,) = ( //~ ERROR any use of this value will cause an error
u16::MAX * 2,
);
const VALS_U32: (u32,) = ( //~ ERROR any use of this value will cause an error
u32::MAX * 2,
);
const VALS_U64: (u64,) = //~ ERROR any use of this value will cause an error
(
|
);
fn main() {
foo(VALS_I8);
foo(VALS_I16);
foo(VALS_I32);
foo(VALS_I64);
foo(VALS_U8);
foo(VALS_U16);
foo(VALS_U32);
foo(VALS_U64);
}
fn foo<T>(_: T) {
}
|
u64::MAX * 2,
|
random_line_split
|
const-eval-overflow2c.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_imports)]
// Note: the relevant lint pass here runs before some of the constant
// evaluation below (e.g., that performed by codegen and llvm), so if you
// change this warn to a deny, then the compiler will exit before
// those errors are detected.
#![deny(const_err)]
use std::fmt;
use std::{i8, i16, i32, i64, isize};
use std::{u8, u16, u32, u64, usize};
const VALS_I8: (i8,) = //~ ERROR any use of this value will cause an error
(
i8::MIN * 2,
);
const VALS_I16: (i16,) = //~ ERROR any use of this value will cause an error
(
i16::MIN * 2,
);
const VALS_I32: (i32,) = //~ ERROR any use of this value will cause an error
(
i32::MIN * 2,
);
const VALS_I64: (i64,) = //~ ERROR any use of this value will cause an error
(
i64::MIN * 2,
);
const VALS_U8: (u8,) = //~ ERROR any use of this value will cause an error
(
u8::MAX * 2,
);
const VALS_U16: (u16,) = ( //~ ERROR any use of this value will cause an error
u16::MAX * 2,
);
const VALS_U32: (u32,) = ( //~ ERROR any use of this value will cause an error
u32::MAX * 2,
);
const VALS_U64: (u64,) = //~ ERROR any use of this value will cause an error
(
u64::MAX * 2,
);
fn main() {
foo(VALS_I8);
foo(VALS_I16);
foo(VALS_I32);
foo(VALS_I64);
foo(VALS_U8);
foo(VALS_U16);
foo(VALS_U32);
foo(VALS_U64);
}
fn foo<T>(_: T)
|
{
}
|
identifier_body
|
|
stream.rs
|
//! Convenience wrapper for streams to switch between plain TCP and TLS at runtime.
//!
//! There is no dependency on actual TLS implementations. Everything like
//! `native_tls` or `openssl` will work as long as there is a TLS stream supporting standard
//! `Read + Write` traits.
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
/// A stream that might be protected with TLS.
#[non_exhaustive]
#[derive(Debug)]
pub enum MaybeTlsStream<S> {
/// Unencrypted socket stream.
Plain(S),
/// Encrypted socket stream using `native-tls`.
#[cfg(feature = "native-tls")]
NativeTls(tokio_native_tls::TlsStream<S>),
/// Encrypted socket stream using `rustls`.
#[cfg(feature = "__rustls-tls")]
Rustls(tokio_rustls::client::TlsStream<S>),
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncRead for MaybeTlsStream<S> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>>
|
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncWrite for MaybeTlsStream<S> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_write(cx, buf),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_write(cx, buf),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_flush(cx),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_flush(cx),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_shutdown(cx),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_shutdown(cx),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_shutdown(cx),
}
}
}
|
{
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_read(cx, buf),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_read(cx, buf),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_read(cx, buf),
}
}
|
identifier_body
|
stream.rs
|
//! Convenience wrapper for streams to switch between plain TCP and TLS at runtime.
//!
//! There is no dependency on actual TLS implementations. Everything like
//! `native_tls` or `openssl` will work as long as there is a TLS stream supporting standard
//! `Read + Write` traits.
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
/// A stream that might be protected with TLS.
#[non_exhaustive]
#[derive(Debug)]
pub enum MaybeTlsStream<S> {
/// Unencrypted socket stream.
Plain(S),
/// Encrypted socket stream using `native-tls`.
#[cfg(feature = "native-tls")]
NativeTls(tokio_native_tls::TlsStream<S>),
/// Encrypted socket stream using `rustls`.
#[cfg(feature = "__rustls-tls")]
Rustls(tokio_rustls::client::TlsStream<S>),
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncRead for MaybeTlsStream<S> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_read(cx, buf),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_read(cx, buf),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_read(cx, buf),
}
}
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncWrite for MaybeTlsStream<S> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_write(cx, buf),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_write(cx, buf),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_flush(cx),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_flush(cx),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_flush(cx),
}
}
|
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_shutdown(cx),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_shutdown(cx),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_shutdown(cx),
}
}
}
|
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
|
random_line_split
|
stream.rs
|
//! Convenience wrapper for streams to switch between plain TCP and TLS at runtime.
//!
//! There is no dependency on actual TLS implementations. Everything like
//! `native_tls` or `openssl` will work as long as there is a TLS stream supporting standard
//! `Read + Write` traits.
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
/// A stream that might be protected with TLS.
#[non_exhaustive]
#[derive(Debug)]
pub enum MaybeTlsStream<S> {
/// Unencrypted socket stream.
Plain(S),
/// Encrypted socket stream using `native-tls`.
#[cfg(feature = "native-tls")]
NativeTls(tokio_native_tls::TlsStream<S>),
/// Encrypted socket stream using `rustls`.
#[cfg(feature = "__rustls-tls")]
Rustls(tokio_rustls::client::TlsStream<S>),
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncRead for MaybeTlsStream<S> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_read(cx, buf),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_read(cx, buf),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_read(cx, buf),
}
}
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncWrite for MaybeTlsStream<S> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_write(cx, buf),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_write(cx, buf),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_write(cx, buf),
}
}
fn
|
(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_flush(cx),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_flush(cx),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match self.get_mut() {
MaybeTlsStream::Plain(ref mut s) => Pin::new(s).poll_shutdown(cx),
#[cfg(feature = "native-tls")]
MaybeTlsStream::NativeTls(s) => Pin::new(s).poll_shutdown(cx),
#[cfg(feature = "__rustls-tls")]
MaybeTlsStream::Rustls(s) => Pin::new(s).poll_shutdown(cx),
}
}
}
|
poll_flush
|
identifier_name
|
lib.rs
|
#![cfg_attr(feature = "unstable", feature(plugin))]
#![cfg_attr(feature = "unstable", plugin(clippy))]
//! The purpose of this crate is to provide an easy way to query the runtime type
//! information (such as field names, offsets and types) for POD (*plain old data*) types,
//! and to allow creating such types without the need for much boilerplate. This information
//! is extremely useful when communicating with foreign low-level libraries, and, more
//! generally, for any kind of serialization/deserialization work.
//!
//! The core functionality is accessible through the
//! [`type_info`](trait.TypeInfo.html#tymethod.type_info) static
//! method of the [`TypeInfo`](trait.TypeInfo.html) trait which
//! comes implemented for all built-in scalar types and fixed-size arrays, and which can
//! be easily implemented for user types by using the [`def!`](macro.def!.html) macro.
//!
//! # Examples
//!
//! Defining reflectable struct types only requires wrapping the struct definition in
//! [`def!`](macro.def!.html):
//!
//! ```ignore
//! #[use_macro]
//! extern crate typeinfo;
//! use typeinfo::TypeInfo;
//!
//! def! {
//! #[derive(Debug)]
//! pub struct Color { r: u16, g: u16, b: u16, }
//!
//! #[derive(Debug)]
//! #[repr(packed)]
//! pub struct Palette {
//! monochrome: bool,
//! colors: [Color; 16]
//! }
//! }
//!
//! fn main() {
//! println!("{:#?}", Palette::type_info());
//! }
//! ```
//!
//! Output (whitespace formatted):
//!
//! ```ignore
//! Compound([
//! Field { ty: Bool, name: "monochrome", offset: 0 },
//! Field {
//! ty: Array(
//! Compound([
//! Field { ty: UInt16, name: "r", offset: 0 },
//! Field { ty: UInt16, name: "g", offset: 2 },
//! Field { ty: UInt16, name: "b", offset: 4 }
//! ], 6),
//! 16),
//! name: "colors",
//! offset: 1
//! }
//! ], 97)
//! ```
/// Represents a POD type: scalar, fixed-size array or compound (struct).
/// May be arbitrarily nested.
#[derive(Clone, PartialEq, Debug)]
pub enum Type {
/// 1-byte signed integer
Int8,
/// 2-byte signed integer
Int16,
/// 4-byte signed integer
Int32,
/// 8-byte signed integer
Int64,
/// 1-byte unsigned integer
UInt8,
/// 2-byte unsigned integer
UInt16,
/// 3-byte unsigned integer
UInt32,
/// 4-byte unsigned integer
UInt64,
/// 4-byte floating-point number
Float32,
/// 8-byte floating-point number
Float64,
/// 4-byte unicode character type
Char,
/// 1-byte boolean type
Bool,
/// fixed-size array with POD elements
Array(Box<Type>, usize),
/// compound type whose fields are POD
Compound(Vec<Field>, usize),
}
impl Type {
/// Returns the total size of a type value in bytes.
pub fn
|
(&self) -> usize {
match *self {
Type::Int8 | Type::UInt8 | Type::Bool => 1,
Type::Int16 | Type::UInt16 => 2,
Type::Int32 | Type::UInt32 | Type::Float32 | Type::Char => 4,
Type::Int64 | Type::UInt64 | Type::Float64 => 8,
Type::Array(ref ty, num) => ty.size() * num,
Type::Compound(_, size) => size,
}
}
/// Returns true if the underlying type is a scalar.
pub fn is_scalar(&self) -> bool {
!self.is_array() &&!self.is_compound()
}
/// Returns true if the underlying type is a fixed-size array.
pub fn is_array(&self) -> bool {
if let Type::Array(_, _) = *self { true } else { false }
}
/// Returns true if the underlying type is compound.
pub fn is_compound(&self) -> bool {
if let Type::Compound(_, _) = *self { true } else { false }
}
}
/// Field of a compound type: contains type, name and offset from the beginning of the struct.
#[derive(Clone, PartialEq, Debug)]
pub struct Field {
/// field value type
pub ty: Type,
/// field name
pub name: String,
/// offset to the beginning of the struct
pub offset: usize,
}
impl Field {
pub fn new<S: Into<String>>(ty: &Type, name: S, offset: usize) -> Field {
Field {
ty: ty.clone(),
name: name.into(),
offset: offset
}
}
}
/// Trait implemented by copyable POD data types with fixed size, enables
/// runtime reflection.
///
/// This trait is implemented by default for all built-in scalar types (integer,
/// floating-point, boolean and character), and there's a generic implementation
/// for fixed-size arrays. Note that pointer-sized integer types `isize` /
/// `usize` map to either `Int32` / `UInt32` or `Int64` / `UInt64` respectively,
/// depending on the host platform.
///
/// The easiest way to generate an implementation for a compound type is to use
/// the provided [`def!`](macro.def!.html) macro.
pub trait TypeInfo: Copy {
/// Returns the runtime type information for the implementing type.
fn type_info() -> Type;
}
macro_rules! impl_scalar {
($t:ty, $i:ident) => (
impl $crate::TypeInfo for $t {
#[inline(always)]
fn type_info() -> $crate::Type {
$crate::Type::$i
}
}
)
}
// implement TypeInfo for built-in scalar types
impl_scalar!(i8, Int8);
impl_scalar!(i16, Int16);
impl_scalar!(i32, Int32);
impl_scalar!(i64, Int64);
impl_scalar!(u8, UInt8);
impl_scalar!(u16, UInt16);
impl_scalar!(u32, UInt32);
impl_scalar!(u64, UInt64);
impl_scalar!(f32, Float32);
impl_scalar!(f64, Float64);
impl_scalar!(char, Char);
impl_scalar!(bool, Bool);
#[cfg(target_pointer_width = "32")] impl_scalar!(isize, Int32);
#[cfg(target_pointer_width = "64")] impl_scalar!(isize, Int64);
#[cfg(target_pointer_width = "32")] impl_scalar!(usize, UInt32);
#[cfg(target_pointer_width = "64")] impl_scalar!(usize, UInt64);
macro_rules! impl_array {
($($n:expr),*$(,)*) => {
$(
impl<T: $crate::TypeInfo> $crate::TypeInfo for [T; $n] {
#[inline(always)]
fn type_info() -> $crate::Type {
$crate::Type::Array(
Box::new(<T as $crate::TypeInfo>::type_info()),
$n
)
}
}
)*
};
}
// implement TypeInfo for fixed-size arrays of lengths 0..63
impl_array!(
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
);
/// Compound type constructor that implements [`TypeInfo`](trait.TypeInfo.html)
/// trait automatically.
///
/// This macro can be used anywhere a normal struct definition can be placed, supports
/// visibility qualifiers, struct attributes, nested datatypes and multiple struct
/// definitions inside one invocation.
///
/// `def!` defines the type as given, derives `Clone` and `Copy`, and implements the
/// [`TypeInfo`](trait.TypeInfo.html) trait so the type information is readily accessible
/// at runtime.
///
/// *Note:* due to certain limitations of the macro system, a single macro invocation may
/// only contain definitions where both fields and structs have the same visibility qualifier.
///
/// # Examples
/// ```ignore
/// def! {
/// #[derive(Debug)]
/// pub struct Color {
/// r: u8,
/// g: u8,
/// b: u8,
/// }
///
/// pub struct Palette {
/// colors: [Color; 16]
/// }
/// }
/// ```
#[macro_export]
macro_rules! def {
// private struct, private fields
($($(#[$attr:meta])* struct $s:ident { $($i:ident: $t:ty),+$(,)* })*) => (
$(
#[allow(dead_code)]
#[derive(Clone, Copy)]
$(#[$attr])*
struct $s { $($i: $t),+ }
def!(@impl $s { $($i: $t),+ } );
)*
);
// public struct, private fields
($($(#[$attr:meta])* pub struct $s:ident { $($i:ident: $t:ty),+$(,)* })*) => (
$(
#[allow(dead_code)]
#[derive(Clone, Copy)]
$(#[$attr])*
pub struct $s { $($i: $t),+ }
def!(@impl $s { $($i: $t),+ } );
)*
);
// public struct, public fields
($($(#[$attr:meta])* pub struct $s:ident { $(pub $i:ident: $t:ty),+$(,)* })*) => (
$(
#[allow(dead_code)]
#[derive(Clone, Copy)]
$(#[$attr])*
pub struct $s { $(pub $i: $t),+ }
def!(@impl $s { $($i: $t),+ } );
)*
);
// implement TypeInfo trait
(@impl $s:ident { $($i:ident: $t:ty),+ }) => (
impl $crate::TypeInfo for $s {
fn type_info() -> $crate::Type {
let base = 0usize as *const $s;
$crate::Type::Compound(vec![$(
$crate::Field::new(
&<$t as $crate::TypeInfo>::type_info(),
stringify!($i),
unsafe { &((*base).$i) as *const $t as usize}
)
),+], ::std::mem::size_of::<$s>())
}
}
);
}
|
size
|
identifier_name
|
lib.rs
|
#![cfg_attr(feature = "unstable", feature(plugin))]
#![cfg_attr(feature = "unstable", plugin(clippy))]
//! The purpose of this crate is to provide an easy way to query the runtime type
//! information (such as field names, offsets and types) for POD (*plain old data*) types,
//! and to allow creating such types without the need for much boilerplate. This information
//! is extremely useful when communicating with foreign low-level libraries, and, more
//! generally, for any kind of serialization/deserialization work.
//!
//! The core functionality is accessible through the
//! [`type_info`](trait.TypeInfo.html#tymethod.type_info) static
//! method of the [`TypeInfo`](trait.TypeInfo.html) trait which
//! comes implemented for all built-in scalar types and fixed-size arrays, and which can
//! be easily implemented for user types by using the [`def!`](macro.def!.html) macro.
//!
//! # Examples
//!
//! Defining reflectable struct types only requires wrapping the struct definition in
//! [`def!`](macro.def!.html):
//!
//! ```ignore
//! #[use_macro]
//! extern crate typeinfo;
//! use typeinfo::TypeInfo;
//!
//! def! {
//! #[derive(Debug)]
//! pub struct Color { r: u16, g: u16, b: u16, }
//!
//! #[derive(Debug)]
//! #[repr(packed)]
//! pub struct Palette {
//! monochrome: bool,
//! colors: [Color; 16]
//! }
//! }
//!
//! fn main() {
//! println!("{:#?}", Palette::type_info());
//! }
//! ```
//!
//! Output (whitespace formatted):
//!
//! ```ignore
//! Compound([
//! Field { ty: Bool, name: "monochrome", offset: 0 },
//! Field {
//! ty: Array(
//! Compound([
//! Field { ty: UInt16, name: "r", offset: 0 },
//! Field { ty: UInt16, name: "g", offset: 2 },
//! Field { ty: UInt16, name: "b", offset: 4 }
//! ], 6),
//! 16),
//! name: "colors",
//! offset: 1
//! }
//! ], 97)
//! ```
/// Represents a POD type: scalar, fixed-size array or compound (struct).
/// May be arbitrarily nested.
#[derive(Clone, PartialEq, Debug)]
pub enum Type {
/// 1-byte signed integer
Int8,
/// 2-byte signed integer
Int16,
/// 4-byte signed integer
Int32,
/// 8-byte signed integer
Int64,
/// 1-byte unsigned integer
UInt8,
/// 2-byte unsigned integer
UInt16,
/// 3-byte unsigned integer
UInt32,
/// 4-byte unsigned integer
UInt64,
/// 4-byte floating-point number
Float32,
/// 8-byte floating-point number
Float64,
/// 4-byte unicode character type
Char,
/// 1-byte boolean type
Bool,
|
/// fixed-size array with POD elements
Array(Box<Type>, usize),
/// compound type whose fields are POD
Compound(Vec<Field>, usize),
}
impl Type {
/// Returns the total size of a type value in bytes.
pub fn size(&self) -> usize {
match *self {
Type::Int8 | Type::UInt8 | Type::Bool => 1,
Type::Int16 | Type::UInt16 => 2,
Type::Int32 | Type::UInt32 | Type::Float32 | Type::Char => 4,
Type::Int64 | Type::UInt64 | Type::Float64 => 8,
Type::Array(ref ty, num) => ty.size() * num,
Type::Compound(_, size) => size,
}
}
/// Returns true if the underlying type is a scalar.
pub fn is_scalar(&self) -> bool {
!self.is_array() &&!self.is_compound()
}
/// Returns true if the underlying type is a fixed-size array.
pub fn is_array(&self) -> bool {
if let Type::Array(_, _) = *self { true } else { false }
}
/// Returns true if the underlying type is compound.
pub fn is_compound(&self) -> bool {
if let Type::Compound(_, _) = *self { true } else { false }
}
}
/// Field of a compound type: contains type, name and offset from the beginning of the struct.
#[derive(Clone, PartialEq, Debug)]
pub struct Field {
/// field value type
pub ty: Type,
/// field name
pub name: String,
/// offset to the beginning of the struct
pub offset: usize,
}
impl Field {
pub fn new<S: Into<String>>(ty: &Type, name: S, offset: usize) -> Field {
Field {
ty: ty.clone(),
name: name.into(),
offset: offset
}
}
}
/// Trait implemented by copyable POD data types with fixed size, enables
/// runtime reflection.
///
/// This trait is implemented by default for all built-in scalar types (integer,
/// floating-point, boolean and character), and there's a generic implementation
/// for fixed-size arrays. Note that pointer-sized integer types `isize` /
/// `usize` map to either `Int32` / `UInt32` or `Int64` / `UInt64` respectively,
/// depending on the host platform.
///
/// The easiest way to generate an implementation for a compound type is to use
/// the provided [`def!`](macro.def!.html) macro.
pub trait TypeInfo: Copy {
/// Returns the runtime type information for the implementing type.
fn type_info() -> Type;
}
macro_rules! impl_scalar {
($t:ty, $i:ident) => (
impl $crate::TypeInfo for $t {
#[inline(always)]
fn type_info() -> $crate::Type {
$crate::Type::$i
}
}
)
}
// implement TypeInfo for built-in scalar types
impl_scalar!(i8, Int8);
impl_scalar!(i16, Int16);
impl_scalar!(i32, Int32);
impl_scalar!(i64, Int64);
impl_scalar!(u8, UInt8);
impl_scalar!(u16, UInt16);
impl_scalar!(u32, UInt32);
impl_scalar!(u64, UInt64);
impl_scalar!(f32, Float32);
impl_scalar!(f64, Float64);
impl_scalar!(char, Char);
impl_scalar!(bool, Bool);
#[cfg(target_pointer_width = "32")] impl_scalar!(isize, Int32);
#[cfg(target_pointer_width = "64")] impl_scalar!(isize, Int64);
#[cfg(target_pointer_width = "32")] impl_scalar!(usize, UInt32);
#[cfg(target_pointer_width = "64")] impl_scalar!(usize, UInt64);
macro_rules! impl_array {
($($n:expr),*$(,)*) => {
$(
impl<T: $crate::TypeInfo> $crate::TypeInfo for [T; $n] {
#[inline(always)]
fn type_info() -> $crate::Type {
$crate::Type::Array(
Box::new(<T as $crate::TypeInfo>::type_info()),
$n
)
}
}
)*
};
}
// implement TypeInfo for fixed-size arrays of lengths 0..63
impl_array!(
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
);
/// Compound type constructor that implements [`TypeInfo`](trait.TypeInfo.html)
/// trait automatically.
///
/// This macro can be used anywhere a normal struct definition can be placed, supports
/// visibility qualifiers, struct attributes, nested datatypes and multiple struct
/// definitions inside one invocation.
///
/// `def!` defines the type as given, derives `Clone` and `Copy`, and implements the
/// [`TypeInfo`](trait.TypeInfo.html) trait so the type information is readily accessible
/// at runtime.
///
/// *Note:* due to certain limitations of the macro system, a single macro invocation may
/// only contain definitions where both fields and structs have the same visibility qualifier.
///
/// # Examples
/// ```ignore
/// def! {
/// #[derive(Debug)]
/// pub struct Color {
/// r: u8,
/// g: u8,
/// b: u8,
/// }
///
/// pub struct Palette {
/// colors: [Color; 16]
/// }
/// }
/// ```
#[macro_export]
macro_rules! def {
// private struct, private fields
($($(#[$attr:meta])* struct $s:ident { $($i:ident: $t:ty),+$(,)* })*) => (
$(
#[allow(dead_code)]
#[derive(Clone, Copy)]
$(#[$attr])*
struct $s { $($i: $t),+ }
def!(@impl $s { $($i: $t),+ } );
)*
);
// public struct, private fields
($($(#[$attr:meta])* pub struct $s:ident { $($i:ident: $t:ty),+$(,)* })*) => (
$(
#[allow(dead_code)]
#[derive(Clone, Copy)]
$(#[$attr])*
pub struct $s { $($i: $t),+ }
def!(@impl $s { $($i: $t),+ } );
)*
);
// public struct, public fields
($($(#[$attr:meta])* pub struct $s:ident { $(pub $i:ident: $t:ty),+$(,)* })*) => (
$(
#[allow(dead_code)]
#[derive(Clone, Copy)]
$(#[$attr])*
pub struct $s { $(pub $i: $t),+ }
def!(@impl $s { $($i: $t),+ } );
)*
);
// implement TypeInfo trait
(@impl $s:ident { $($i:ident: $t:ty),+ }) => (
impl $crate::TypeInfo for $s {
fn type_info() -> $crate::Type {
let base = 0usize as *const $s;
$crate::Type::Compound(vec![$(
$crate::Field::new(
&<$t as $crate::TypeInfo>::type_info(),
stringify!($i),
unsafe { &((*base).$i) as *const $t as usize}
)
),+], ::std::mem::size_of::<$s>())
}
}
);
}
|
random_line_split
|
|
fmt.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Deprecated fmt! syntax extension
use ast;
use codemap::Span;
use ext::base;
use ext::build::AstBuilder;
pub fn expand_syntax_ext(ecx: &mut base::ExtCtxt, sp: Span,
_tts: &[ast::TokenTree]) -> base::MacResult
|
{
ecx.span_err(sp, "`fmt!` is deprecated, use `format!` instead");
ecx.parse_sess.span_diagnostic.span_note(sp,
"see http://static.rust-lang.org/doc/master/std/fmt/index.html \
for documentation");
base::MRExpr(ecx.expr_uint(sp, 2))
}
|
identifier_body
|
|
fmt.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Deprecated fmt! syntax extension
use ast;
use codemap::Span;
use ext::base;
use ext::build::AstBuilder;
pub fn expand_syntax_ext(ecx: &mut base::ExtCtxt, sp: Span,
_tts: &[ast::TokenTree]) -> base::MacResult {
ecx.span_err(sp, "`fmt!` is deprecated, use `format!` instead");
ecx.parse_sess.span_diagnostic.span_note(sp,
"see http://static.rust-lang.org/doc/master/std/fmt/index.html \
for documentation");
|
}
|
base::MRExpr(ecx.expr_uint(sp, 2))
|
random_line_split
|
fmt.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Deprecated fmt! syntax extension
use ast;
use codemap::Span;
use ext::base;
use ext::build::AstBuilder;
pub fn
|
(ecx: &mut base::ExtCtxt, sp: Span,
_tts: &[ast::TokenTree]) -> base::MacResult {
ecx.span_err(sp, "`fmt!` is deprecated, use `format!` instead");
ecx.parse_sess.span_diagnostic.span_note(sp,
"see http://static.rust-lang.org/doc/master/std/fmt/index.html \
for documentation");
base::MRExpr(ecx.expr_uint(sp, 2))
}
|
expand_syntax_ext
|
identifier_name
|
util.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::fmt::Write;
use toml::Value;
use anyhow::Result;
#[cfg(feature = "early-panic")]
#[macro_export]
macro_rules! if_cfg_panic {
() => { panic!() };
($msg:expr) => { panic!($msg) };
($fmt:expr, $($arg:tt)+) => { panic!($fmt, $($arg),+) };
}
#[cfg(not(feature = "early-panic"))]
#[macro_export]
macro_rules! if_cfg_panic {
() => { };
($msg:expr) => { };
($fmt:expr, $($arg:tt)+) => { };
}
pub fn entry_buffer_to_header_content(buf: &str) -> Result<(Value, String)> {
debug!("Building entry from string");
let mut header = String::new();
let mut content = String::new();
let mut header_consumed = false;
let mut iter = buf.split('\n').skip(1).peekable(); // the first line is "---"
while let Some(line) = iter.next() {
if line == "---" &&!header_consumed {
header_consumed = true;
// do not further process the line
} else if!header_consumed {
writeln!(header, "{}", line)?;
} else if iter.peek().is_some() {
writeln!(content, "{}", line)?;
} else {
write!(content, "{}", line)?;
}
}
let h = ::toml::de::from_str(&header)?;
Ok((h, content))
}
#[cfg(test)]
mod test {
extern crate env_logger;
use super::entry_buffer_to_header_content;
fn setup_logging() {
let _ = env_logger::try_init();
}
fn mkfile(content: &str) -> String {
format!(r#"---
[imag]
version = '{version}'
---
{content}"#, version = env!("CARGO_PKG_VERSION"), content = content)
}
#[test]
fn test_entry_buffer_to_header_content_1() {
let content = "Hai";
let file = format!(r#"---
[imag]
version = '{version}'
---
{content}"#, version = env!("CARGO_PKG_VERSION"), content = content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_2() {
setup_logging();
let content = r#"Hai
"#;
let file = mkfile(&content);
debug!("FILE: <<<{}>>>", file);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
debug!("CONTENT: <<<{}>>>", res_content);
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_3() {
let content = r#"Hai
barbar
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn
|
() {
let content = r#"Hai
---
barbar
---
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_5() {
let content = r#"Hai
---
barbar
---
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
}
|
test_entry_buffer_to_header_content_4
|
identifier_name
|
util.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::fmt::Write;
use toml::Value;
use anyhow::Result;
#[cfg(feature = "early-panic")]
#[macro_export]
macro_rules! if_cfg_panic {
() => { panic!() };
($msg:expr) => { panic!($msg) };
($fmt:expr, $($arg:tt)+) => { panic!($fmt, $($arg),+) };
}
#[cfg(not(feature = "early-panic"))]
#[macro_export]
macro_rules! if_cfg_panic {
() => { };
($msg:expr) => { };
($fmt:expr, $($arg:tt)+) => { };
}
pub fn entry_buffer_to_header_content(buf: &str) -> Result<(Value, String)> {
debug!("Building entry from string");
let mut header = String::new();
let mut content = String::new();
let mut header_consumed = false;
let mut iter = buf.split('\n').skip(1).peekable(); // the first line is "---"
while let Some(line) = iter.next() {
if line == "---" &&!header_consumed {
header_consumed = true;
// do not further process the line
} else if!header_consumed {
writeln!(header, "{}", line)?;
} else if iter.peek().is_some() {
writeln!(content, "{}", line)?;
} else {
write!(content, "{}", line)?;
}
}
let h = ::toml::de::from_str(&header)?;
Ok((h, content))
}
#[cfg(test)]
mod test {
extern crate env_logger;
use super::entry_buffer_to_header_content;
fn setup_logging() {
let _ = env_logger::try_init();
}
fn mkfile(content: &str) -> String {
format!(r#"---
[imag]
version = '{version}'
---
{content}"#, version = env!("CARGO_PKG_VERSION"), content = content)
}
#[test]
fn test_entry_buffer_to_header_content_1() {
let content = "Hai";
let file = format!(r#"---
[imag]
version = '{version}'
---
{content}"#, version = env!("CARGO_PKG_VERSION"), content = content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_2()
|
#[test]
fn test_entry_buffer_to_header_content_3() {
let content = r#"Hai
barbar
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_4() {
let content = r#"Hai
---
barbar
---
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_5() {
let content = r#"Hai
---
barbar
---
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
}
|
{
setup_logging();
let content = r#"Hai
"#;
let file = mkfile(&content);
debug!("FILE: <<<{}>>>", file);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
debug!("CONTENT: <<<{}>>>", res_content);
assert_eq!(res_content, content)
}
|
identifier_body
|
util.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::fmt::Write;
use toml::Value;
use anyhow::Result;
#[cfg(feature = "early-panic")]
#[macro_export]
macro_rules! if_cfg_panic {
() => { panic!() };
($msg:expr) => { panic!($msg) };
($fmt:expr, $($arg:tt)+) => { panic!($fmt, $($arg),+) };
}
#[cfg(not(feature = "early-panic"))]
#[macro_export]
macro_rules! if_cfg_panic {
() => { };
($msg:expr) => { };
($fmt:expr, $($arg:tt)+) => { };
}
pub fn entry_buffer_to_header_content(buf: &str) -> Result<(Value, String)> {
debug!("Building entry from string");
let mut header = String::new();
let mut content = String::new();
let mut header_consumed = false;
let mut iter = buf.split('\n').skip(1).peekable(); // the first line is "---"
while let Some(line) = iter.next() {
if line == "---" &&!header_consumed {
header_consumed = true;
// do not further process the line
} else if!header_consumed {
writeln!(header, "{}", line)?;
} else if iter.peek().is_some() {
writeln!(content, "{}", line)?;
} else {
write!(content, "{}", line)?;
}
}
let h = ::toml::de::from_str(&header)?;
Ok((h, content))
}
|
fn setup_logging() {
let _ = env_logger::try_init();
}
fn mkfile(content: &str) -> String {
format!(r#"---
[imag]
version = '{version}'
---
{content}"#, version = env!("CARGO_PKG_VERSION"), content = content)
}
#[test]
fn test_entry_buffer_to_header_content_1() {
let content = "Hai";
let file = format!(r#"---
[imag]
version = '{version}'
---
{content}"#, version = env!("CARGO_PKG_VERSION"), content = content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_2() {
setup_logging();
let content = r#"Hai
"#;
let file = mkfile(&content);
debug!("FILE: <<<{}>>>", file);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
debug!("CONTENT: <<<{}>>>", res_content);
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_3() {
let content = r#"Hai
barbar
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_4() {
let content = r#"Hai
---
barbar
---
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
#[test]
fn test_entry_buffer_to_header_content_5() {
let content = r#"Hai
---
barbar
---
"#;
let file = mkfile(&content);
let res = entry_buffer_to_header_content(&file);
assert!(res.is_ok());
let (_, res_content) = res.unwrap();
assert_eq!(res_content, content)
}
}
|
#[cfg(test)]
mod test {
extern crate env_logger;
use super::entry_buffer_to_header_content;
|
random_line_split
|
util.rs
|
// This file is made up largely of utility methods which are invoked by the
// session in its interpret method. They are separate because they don't rely
// on the session (or take what they do need as arguments) and/or they are
// called by the session in multiple places.
use std::env::current_dir;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use regex::Regex;
use walkdir::WalkDir;
use folder::Folder;
#[macro_export]
macro_rules! path_filename_to_str(
($p:ident) => ({
use std::ffi::OsStr;
$p.file_name().unwrap_or_else(|| OsStr::new("")).to_str().unwrap_or_else(|| "")
});
);
fn
|
(dir: &Path) -> String {
match current_dir() {
Err(_) => dir.display().to_string(),
Ok(absp) => {
let mut abs_path = absp.clone();
abs_path.push(dir);
abs_path.display().to_string()
}
}
}
pub fn perform_select(maildir: &str, select_args: &[&str], examine: bool,
tag: &str) -> (Option<Folder>, String) {
let err_res = (None, "".to_string());
if select_args.len() < 1 { return err_res; }
let mbox_name = select_args[0].trim_matches('"').replace("INBOX", ".");
let mut maildir_path = PathBuf::new();
maildir_path.push(maildir);
maildir_path.push(mbox_name);
let folder = match Folder::new(maildir_path, examine) {
None => { return err_res; }
Some(folder) => folder.clone()
};
let ok_res = folder.select_response(tag);
(Some(folder), ok_res)
}
/// For the given dir, make sure it is a valid mail folder and, if it is,
/// generate the LIST response for it.
fn list_dir(dir: &Path, regex: &Regex, maildir_path: &Path) -> Option<String> {
let dir_string = dir.display().to_string();
let dir_name = path_filename_to_str!(dir);
// These folder names are used to hold mail. Every other folder is
// valid.
if dir_name == "cur" || dir_name == "new" || dir_name == "tmp" {
return None;
}
let abs_dir = make_absolute(dir);
// If it doesn't have any mail, then it isn't selectable as a mail
// folder but it may contain subfolders which hold mail.
let mut flags = match fs::read_dir(&dir.join("cur")) {
Err(_) => "\\Noselect".to_string(),
_ => {
match fs::read_dir(&dir.join("new")) {
Err(_) => "\\Noselect".to_string(),
// If there is new mail in the folder, we should inform the
// client. We do this only because we have to perform the
// check in order to determine selectability. The RFC says
// not to perform the check if it would slow down the
// response time.
Ok(newlisting) => {
if newlisting.count() == 0 {
"\\Unmarked".to_string()
} else {
"\\Marked".to_string()
}
}
}
}
};
// Changing folders in mutt doesn't work properly if we don't indicate
// whether or not a given folder has subfolders. Mutt has issues
// selecting folders with subfolders for reading mail, unfortunately.
match fs::read_dir(&dir) {
Err(_) => { return None; }
Ok(dir_listing) => {
let mut children = false;
for subdir_entry in dir_listing {
if let Ok(subdir) = subdir_entry {
if *dir == *maildir_path {
break;
}
let subdir_path = subdir.path();
let subdir_str = path_filename_to_str!(subdir_path);
if subdir_str!= "cur" &&
subdir_str!= "new" &&
subdir_str!= "tmp" {
if fs::read_dir(&subdir.path().join("cur")).is_err() {
continue;
}
if fs::read_dir(&subdir.path().join("new")).is_err() {
continue;
}
children = true;
break;
}
}
}
if children {
flags.push_str(" \\HasChildren");
} else {
flags.push_str(" \\HasNoChildren");
}
}
}
let re_path = make_absolute(maildir_path);
match fs::metadata(dir) {
Err(_) => return None,
Ok(md) =>
if!md.is_dir() {
return None;
}
};
if!regex.is_match(&dir_string[..]) {
return None;
}
let mut list_str = "* LIST (".to_string();
list_str.push_str(&flags[..]);
list_str.push_str(") \"/\" ");
let list_dir_string = if abs_dir.starts_with(&re_path[..]) {
abs_dir.replacen(&re_path[..], "", 1)
} else {
abs_dir
};
list_str.push_str(&(list_dir_string.replace("INBOX", ""))[..]);
Some(list_str)
}
/// Go through the logged in user's maildir and list every folder matching
/// the given regular expression. Returns a list of LIST responses.
pub fn list(maildir: &str, regex: &Regex) -> Vec<String> {
let maildir_path = Path::new(maildir);
let mut responses = Vec::new();
if let Some(list_response) = list_dir(maildir_path, regex, maildir_path) {
responses.push(list_response);
}
for dir_res in WalkDir::new(&maildir_path) {
if let Ok(dir) = dir_res {
if let Some(list_response) = list_dir(dir.path(), regex, maildir_path) {
responses.push(list_response);
}
}
}
responses
}
|
make_absolute
|
identifier_name
|
util.rs
|
// This file is made up largely of utility methods which are invoked by the
// session in its interpret method. They are separate because they don't rely
// on the session (or take what they do need as arguments) and/or they are
// called by the session in multiple places.
use std::env::current_dir;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use regex::Regex;
use walkdir::WalkDir;
use folder::Folder;
#[macro_export]
macro_rules! path_filename_to_str(
($p:ident) => ({
use std::ffi::OsStr;
$p.file_name().unwrap_or_else(|| OsStr::new("")).to_str().unwrap_or_else(|| "")
});
);
fn make_absolute(dir: &Path) -> String {
match current_dir() {
Err(_) => dir.display().to_string(),
Ok(absp) => {
let mut abs_path = absp.clone();
abs_path.push(dir);
abs_path.display().to_string()
}
}
}
pub fn perform_select(maildir: &str, select_args: &[&str], examine: bool,
tag: &str) -> (Option<Folder>, String) {
let err_res = (None, "".to_string());
if select_args.len() < 1 { return err_res; }
let mbox_name = select_args[0].trim_matches('"').replace("INBOX", ".");
let mut maildir_path = PathBuf::new();
maildir_path.push(maildir);
maildir_path.push(mbox_name);
let folder = match Folder::new(maildir_path, examine) {
None => { return err_res; }
Some(folder) => folder.clone()
};
let ok_res = folder.select_response(tag);
(Some(folder), ok_res)
}
/// For the given dir, make sure it is a valid mail folder and, if it is,
/// generate the LIST response for it.
fn list_dir(dir: &Path, regex: &Regex, maildir_path: &Path) -> Option<String> {
let dir_string = dir.display().to_string();
let dir_name = path_filename_to_str!(dir);
// These folder names are used to hold mail. Every other folder is
// valid.
if dir_name == "cur" || dir_name == "new" || dir_name == "tmp" {
return None;
}
let abs_dir = make_absolute(dir);
// If it doesn't have any mail, then it isn't selectable as a mail
// folder but it may contain subfolders which hold mail.
let mut flags = match fs::read_dir(&dir.join("cur")) {
Err(_) => "\\Noselect".to_string(),
_ => {
match fs::read_dir(&dir.join("new")) {
Err(_) => "\\Noselect".to_string(),
// If there is new mail in the folder, we should inform the
// client. We do this only because we have to perform the
// check in order to determine selectability. The RFC says
// not to perform the check if it would slow down the
// response time.
Ok(newlisting) => {
if newlisting.count() == 0 {
"\\Unmarked".to_string()
} else {
"\\Marked".to_string()
}
}
}
}
};
// Changing folders in mutt doesn't work properly if we don't indicate
// whether or not a given folder has subfolders. Mutt has issues
// selecting folders with subfolders for reading mail, unfortunately.
match fs::read_dir(&dir) {
Err(_) => { return None; }
Ok(dir_listing) => {
let mut children = false;
for subdir_entry in dir_listing {
if let Ok(subdir) = subdir_entry {
if *dir == *maildir_path {
break;
}
let subdir_path = subdir.path();
let subdir_str = path_filename_to_str!(subdir_path);
if subdir_str!= "cur" &&
subdir_str!= "new" &&
subdir_str!= "tmp" {
if fs::read_dir(&subdir.path().join("cur")).is_err() {
continue;
}
if fs::read_dir(&subdir.path().join("new")).is_err() {
continue;
}
children = true;
break;
}
}
}
if children {
flags.push_str(" \\HasChildren");
} else {
flags.push_str(" \\HasNoChildren");
}
}
}
let re_path = make_absolute(maildir_path);
match fs::metadata(dir) {
Err(_) => return None,
Ok(md) =>
if!md.is_dir() {
return None;
}
};
if!regex.is_match(&dir_string[..]) {
return None;
}
let mut list_str = "* LIST (".to_string();
list_str.push_str(&flags[..]);
list_str.push_str(") \"/\" ");
let list_dir_string = if abs_dir.starts_with(&re_path[..]) {
abs_dir.replacen(&re_path[..], "", 1)
} else {
abs_dir
};
list_str.push_str(&(list_dir_string.replace("INBOX", ""))[..]);
Some(list_str)
}
/// Go through the logged in user's maildir and list every folder matching
/// the given regular expression. Returns a list of LIST responses.
pub fn list(maildir: &str, regex: &Regex) -> Vec<String>
|
{
let maildir_path = Path::new(maildir);
let mut responses = Vec::new();
if let Some(list_response) = list_dir(maildir_path, regex, maildir_path) {
responses.push(list_response);
}
for dir_res in WalkDir::new(&maildir_path) {
if let Ok(dir) = dir_res {
if let Some(list_response) = list_dir(dir.path(), regex, maildir_path) {
responses.push(list_response);
}
}
}
responses
}
|
identifier_body
|
|
util.rs
|
// This file is made up largely of utility methods which are invoked by the
// session in its interpret method. They are separate because they don't rely
// on the session (or take what they do need as arguments) and/or they are
// called by the session in multiple places.
use std::env::current_dir;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use regex::Regex;
use walkdir::WalkDir;
use folder::Folder;
#[macro_export]
macro_rules! path_filename_to_str(
($p:ident) => ({
use std::ffi::OsStr;
$p.file_name().unwrap_or_else(|| OsStr::new("")).to_str().unwrap_or_else(|| "")
});
);
fn make_absolute(dir: &Path) -> String {
match current_dir() {
Err(_) => dir.display().to_string(),
Ok(absp) => {
let mut abs_path = absp.clone();
abs_path.push(dir);
abs_path.display().to_string()
}
}
}
pub fn perform_select(maildir: &str, select_args: &[&str], examine: bool,
tag: &str) -> (Option<Folder>, String) {
let err_res = (None, "".to_string());
if select_args.len() < 1 { return err_res; }
let mbox_name = select_args[0].trim_matches('"').replace("INBOX", ".");
let mut maildir_path = PathBuf::new();
maildir_path.push(maildir);
maildir_path.push(mbox_name);
let folder = match Folder::new(maildir_path, examine) {
None => { return err_res; }
Some(folder) => folder.clone()
};
let ok_res = folder.select_response(tag);
(Some(folder), ok_res)
}
/// For the given dir, make sure it is a valid mail folder and, if it is,
/// generate the LIST response for it.
fn list_dir(dir: &Path, regex: &Regex, maildir_path: &Path) -> Option<String> {
let dir_string = dir.display().to_string();
let dir_name = path_filename_to_str!(dir);
// These folder names are used to hold mail. Every other folder is
// valid.
if dir_name == "cur" || dir_name == "new" || dir_name == "tmp" {
return None;
}
let abs_dir = make_absolute(dir);
// If it doesn't have any mail, then it isn't selectable as a mail
// folder but it may contain subfolders which hold mail.
let mut flags = match fs::read_dir(&dir.join("cur")) {
Err(_) => "\\Noselect".to_string(),
_ => {
match fs::read_dir(&dir.join("new")) {
Err(_) => "\\Noselect".to_string(),
// If there is new mail in the folder, we should inform the
// client. We do this only because we have to perform the
// check in order to determine selectability. The RFC says
// not to perform the check if it would slow down the
// response time.
Ok(newlisting) => {
if newlisting.count() == 0 {
"\\Unmarked".to_string()
} else {
"\\Marked".to_string()
}
}
}
}
};
// Changing folders in mutt doesn't work properly if we don't indicate
// whether or not a given folder has subfolders. Mutt has issues
// selecting folders with subfolders for reading mail, unfortunately.
match fs::read_dir(&dir) {
Err(_) => { return None; }
Ok(dir_listing) => {
let mut children = false;
for subdir_entry in dir_listing {
if let Ok(subdir) = subdir_entry {
if *dir == *maildir_path {
break;
}
let subdir_path = subdir.path();
let subdir_str = path_filename_to_str!(subdir_path);
if subdir_str!= "cur" &&
subdir_str!= "new" &&
subdir_str!= "tmp" {
if fs::read_dir(&subdir.path().join("cur")).is_err() {
continue;
}
if fs::read_dir(&subdir.path().join("new")).is_err() {
continue;
}
children = true;
break;
}
}
}
if children {
flags.push_str(" \\HasChildren");
} else {
flags.push_str(" \\HasNoChildren");
}
}
}
let re_path = make_absolute(maildir_path);
match fs::metadata(dir) {
Err(_) => return None,
Ok(md) =>
if!md.is_dir() {
return None;
}
};
if!regex.is_match(&dir_string[..]) {
return None;
}
let mut list_str = "* LIST (".to_string();
list_str.push_str(&flags[..]);
list_str.push_str(") \"/\" ");
let list_dir_string = if abs_dir.starts_with(&re_path[..]) {
abs_dir.replacen(&re_path[..], "", 1)
} else {
abs_dir
};
list_str.push_str(&(list_dir_string.replace("INBOX", ""))[..]);
Some(list_str)
}
/// Go through the logged in user's maildir and list every folder matching
/// the given regular expression. Returns a list of LIST responses.
pub fn list(maildir: &str, regex: &Regex) -> Vec<String> {
let maildir_path = Path::new(maildir);
let mut responses = Vec::new();
if let Some(list_response) = list_dir(maildir_path, regex, maildir_path) {
responses.push(list_response);
}
for dir_res in WalkDir::new(&maildir_path) {
if let Ok(dir) = dir_res
|
}
responses
}
|
{
if let Some(list_response) = list_dir(dir.path(), regex, maildir_path) {
responses.push(list_response);
}
}
|
conditional_block
|
util.rs
|
// This file is made up largely of utility methods which are invoked by the
// session in its interpret method. They are separate because they don't rely
// on the session (or take what they do need as arguments) and/or they are
// called by the session in multiple places.
use std::env::current_dir;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use regex::Regex;
use walkdir::WalkDir;
use folder::Folder;
#[macro_export]
macro_rules! path_filename_to_str(
($p:ident) => ({
use std::ffi::OsStr;
$p.file_name().unwrap_or_else(|| OsStr::new("")).to_str().unwrap_or_else(|| "")
});
);
fn make_absolute(dir: &Path) -> String {
match current_dir() {
Err(_) => dir.display().to_string(),
Ok(absp) => {
let mut abs_path = absp.clone();
abs_path.push(dir);
abs_path.display().to_string()
}
}
}
pub fn perform_select(maildir: &str, select_args: &[&str], examine: bool,
tag: &str) -> (Option<Folder>, String) {
let err_res = (None, "".to_string());
if select_args.len() < 1 { return err_res; }
let mbox_name = select_args[0].trim_matches('"').replace("INBOX", ".");
let mut maildir_path = PathBuf::new();
maildir_path.push(maildir);
maildir_path.push(mbox_name);
let folder = match Folder::new(maildir_path, examine) {
None => { return err_res; }
Some(folder) => folder.clone()
};
let ok_res = folder.select_response(tag);
(Some(folder), ok_res)
}
/// For the given dir, make sure it is a valid mail folder and, if it is,
/// generate the LIST response for it.
fn list_dir(dir: &Path, regex: &Regex, maildir_path: &Path) -> Option<String> {
let dir_string = dir.display().to_string();
let dir_name = path_filename_to_str!(dir);
// These folder names are used to hold mail. Every other folder is
// valid.
if dir_name == "cur" || dir_name == "new" || dir_name == "tmp" {
return None;
}
let abs_dir = make_absolute(dir);
// If it doesn't have any mail, then it isn't selectable as a mail
// folder but it may contain subfolders which hold mail.
let mut flags = match fs::read_dir(&dir.join("cur")) {
Err(_) => "\\Noselect".to_string(),
_ => {
match fs::read_dir(&dir.join("new")) {
Err(_) => "\\Noselect".to_string(),
// If there is new mail in the folder, we should inform the
// client. We do this only because we have to perform the
// check in order to determine selectability. The RFC says
// not to perform the check if it would slow down the
// response time.
Ok(newlisting) => {
if newlisting.count() == 0 {
"\\Unmarked".to_string()
} else {
"\\Marked".to_string()
}
}
}
}
};
// Changing folders in mutt doesn't work properly if we don't indicate
// whether or not a given folder has subfolders. Mutt has issues
// selecting folders with subfolders for reading mail, unfortunately.
match fs::read_dir(&dir) {
Err(_) => { return None; }
Ok(dir_listing) => {
let mut children = false;
for subdir_entry in dir_listing {
if let Ok(subdir) = subdir_entry {
if *dir == *maildir_path {
break;
}
let subdir_path = subdir.path();
|
continue;
}
if fs::read_dir(&subdir.path().join("new")).is_err() {
continue;
}
children = true;
break;
}
}
}
if children {
flags.push_str(" \\HasChildren");
} else {
flags.push_str(" \\HasNoChildren");
}
}
}
let re_path = make_absolute(maildir_path);
match fs::metadata(dir) {
Err(_) => return None,
Ok(md) =>
if!md.is_dir() {
return None;
}
};
if!regex.is_match(&dir_string[..]) {
return None;
}
let mut list_str = "* LIST (".to_string();
list_str.push_str(&flags[..]);
list_str.push_str(") \"/\" ");
let list_dir_string = if abs_dir.starts_with(&re_path[..]) {
abs_dir.replacen(&re_path[..], "", 1)
} else {
abs_dir
};
list_str.push_str(&(list_dir_string.replace("INBOX", ""))[..]);
Some(list_str)
}
/// Go through the logged in user's maildir and list every folder matching
/// the given regular expression. Returns a list of LIST responses.
pub fn list(maildir: &str, regex: &Regex) -> Vec<String> {
let maildir_path = Path::new(maildir);
let mut responses = Vec::new();
if let Some(list_response) = list_dir(maildir_path, regex, maildir_path) {
responses.push(list_response);
}
for dir_res in WalkDir::new(&maildir_path) {
if let Ok(dir) = dir_res {
if let Some(list_response) = list_dir(dir.path(), regex, maildir_path) {
responses.push(list_response);
}
}
}
responses
}
|
let subdir_str = path_filename_to_str!(subdir_path);
if subdir_str != "cur" &&
subdir_str != "new" &&
subdir_str != "tmp" {
if fs::read_dir(&subdir.path().join("cur")).is_err() {
|
random_line_split
|
liquid.rs
|
use std::io::Write;
use std::sync::{Arc, RwLock};
use world::{self, block};
use shared::Direction;
use model::BlockVertex;
use render;
pub fn render_liquid<W: Write>(textures: Arc<RwLock<render::TextureManager>>,lava: bool, snapshot: &world::Snapshot, x: i32, y: i32, z: i32, buf: &mut W) -> usize {
let get_liquid = if lava {
get_lava_level
} else {
get_water_level
};
let mut count = 0;
let (tl, tr, bl, br) = if get_liquid(snapshot, x, y + 1, z).is_some() {
(8, 8, 8, 8)
} else {
(
average_liquid_level(get_liquid, snapshot, x, y, z),
average_liquid_level(get_liquid, snapshot, x+1, y, z),
average_liquid_level(get_liquid, snapshot, x, y, z+1),
average_liquid_level(get_liquid, snapshot, x+1, y, z+1)
)
};
let tex = match snapshot.get_block(x, y, z) {
block::Block::Water{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_still"),
block::Block::FlowingWater{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_flow"),
block::Block::Lava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_still"),
block::Block::FlowingLava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_flow"),
_ => unreachable!(),
};
let ux1 = 0i16;
let ux2 = 16i16 * tex.get_width() as i16;
let uy1 = 0i16;
let uy2 = 16i16 * tex.get_height() as i16;
for dir in Direction::all() {
let (ox, oy, oz) = dir.get_offset();
let special = dir == Direction::Up && (tl < 8 || tr < 8 || bl < 8 || br < 8);
let block = snapshot.get_block(x+ox, y+oy, z+oz);
if special || (!block.get_material().should_cull_against && get_liquid(snapshot, x+ox, y+oy, z+oz).is_none()) {
let verts = BlockVertex::face_by_direction(dir);
for vert in verts {
let mut vert = vert.clone();
vert.tx = tex.get_x() as u16;
vert.ty = tex.get_y() as u16;
vert.tw = tex.get_width() as u16;
vert.th = tex.get_height() as u16;
vert.tatlas = tex.atlas as i16;
vert.r = 255;
vert.g = 255;
vert.b = 255;
if vert.y == 0.0 {
vert.y = y as f32;
} else {
let height = match (vert.x, vert.z) {
(0.0, 0.0) => ((16.0 / 8.0) * (tl as f32)) as i32,
(_, 0.0) => ((16.0 / 8.0) * (tr as f32)) as i32,
(0.0, _) => ((16.0 / 8.0) * (bl as f32)) as i32,
(_, _) => ((16.0 / 8.0) * (br as f32)) as i32,
};
vert.y = (height as f32)/16.0 + (y as f32);
}
vert.x += x as f32;
vert.z += z as f32;
let (bl, sl) = super::calculate_light(
snapshot,
x, y, z,
vert.x as f64,
vert.y as f64,
vert.z as f64,
dir,
!lava,
false
);
vert.block_light = bl;
vert.sky_light = sl;
if vert.toffsetx == 0 {
vert.toffsetx = ux1;
} else {
vert.toffsetx = ux2;
}
if vert.toffsety == 0 {
vert.toffsety = uy1;
} else {
vert.toffsety = uy2;
}
vert.write(buf);
}
count += 6;
}
}
count
}
fn average_liquid_level(
get: fn(&world::Snapshot, i32, i32, i32) -> Option<i32>,
snapshot: &world::Snapshot, x: i32, y: i32, z: i32
) -> i32 {
let mut level = 0;
for xx in -1.. 1 {
for zz in -1.. 1 {
if get(snapshot, x+xx, y+1, z+zz).is_some() {
return 8;
}
if let Some(l) = get(snapshot, x+xx, y, z+zz) {
let nl = 7 - (l & 0x7);
if nl > level {
level = nl;
}
}
}
}
level
}
fn
|
(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Water{level} | block::Block::FlowingWater{level} => Some(level as i32),
_ => None,
}
}
fn get_lava_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Lava{level} | block::Block::FlowingLava{level} => Some(level as i32),
_ => None,
}
}
|
get_water_level
|
identifier_name
|
liquid.rs
|
use std::io::Write;
use std::sync::{Arc, RwLock};
use world::{self, block};
use shared::Direction;
use model::BlockVertex;
use render;
pub fn render_liquid<W: Write>(textures: Arc<RwLock<render::TextureManager>>,lava: bool, snapshot: &world::Snapshot, x: i32, y: i32, z: i32, buf: &mut W) -> usize {
let get_liquid = if lava {
get_lava_level
} else {
get_water_level
};
let mut count = 0;
let (tl, tr, bl, br) = if get_liquid(snapshot, x, y + 1, z).is_some() {
(8, 8, 8, 8)
} else {
(
average_liquid_level(get_liquid, snapshot, x, y, z),
average_liquid_level(get_liquid, snapshot, x+1, y, z),
average_liquid_level(get_liquid, snapshot, x, y, z+1),
average_liquid_level(get_liquid, snapshot, x+1, y, z+1)
)
};
let tex = match snapshot.get_block(x, y, z) {
block::Block::Water{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_still"),
block::Block::FlowingWater{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_flow"),
block::Block::Lava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_still"),
block::Block::FlowingLava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_flow"),
_ => unreachable!(),
};
let ux1 = 0i16;
let ux2 = 16i16 * tex.get_width() as i16;
let uy1 = 0i16;
let uy2 = 16i16 * tex.get_height() as i16;
for dir in Direction::all() {
let (ox, oy, oz) = dir.get_offset();
let special = dir == Direction::Up && (tl < 8 || tr < 8 || bl < 8 || br < 8);
let block = snapshot.get_block(x+ox, y+oy, z+oz);
if special || (!block.get_material().should_cull_against && get_liquid(snapshot, x+ox, y+oy, z+oz).is_none())
|
(_, _) => ((16.0 / 8.0) * (br as f32)) as i32,
};
vert.y = (height as f32)/16.0 + (y as f32);
}
vert.x += x as f32;
vert.z += z as f32;
let (bl, sl) = super::calculate_light(
snapshot,
x, y, z,
vert.x as f64,
vert.y as f64,
vert.z as f64,
dir,
!lava,
false
);
vert.block_light = bl;
vert.sky_light = sl;
if vert.toffsetx == 0 {
vert.toffsetx = ux1;
} else {
vert.toffsetx = ux2;
}
if vert.toffsety == 0 {
vert.toffsety = uy1;
} else {
vert.toffsety = uy2;
}
vert.write(buf);
}
count += 6;
}
}
count
}
fn average_liquid_level(
get: fn(&world::Snapshot, i32, i32, i32) -> Option<i32>,
snapshot: &world::Snapshot, x: i32, y: i32, z: i32
) -> i32 {
let mut level = 0;
for xx in -1.. 1 {
for zz in -1.. 1 {
if get(snapshot, x+xx, y+1, z+zz).is_some() {
return 8;
}
if let Some(l) = get(snapshot, x+xx, y, z+zz) {
let nl = 7 - (l & 0x7);
if nl > level {
level = nl;
}
}
}
}
level
}
fn get_water_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Water{level} | block::Block::FlowingWater{level} => Some(level as i32),
_ => None,
}
}
fn get_lava_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Lava{level} | block::Block::FlowingLava{level} => Some(level as i32),
_ => None,
}
}
|
{
let verts = BlockVertex::face_by_direction(dir);
for vert in verts {
let mut vert = vert.clone();
vert.tx = tex.get_x() as u16;
vert.ty = tex.get_y() as u16;
vert.tw = tex.get_width() as u16;
vert.th = tex.get_height() as u16;
vert.tatlas = tex.atlas as i16;
vert.r = 255;
vert.g = 255;
vert.b = 255;
if vert.y == 0.0 {
vert.y = y as f32;
} else {
let height = match (vert.x, vert.z) {
(0.0, 0.0) => ((16.0 / 8.0) * (tl as f32)) as i32,
(_, 0.0) => ((16.0 / 8.0) * (tr as f32)) as i32,
(0.0, _) => ((16.0 / 8.0) * (bl as f32)) as i32,
|
conditional_block
|
liquid.rs
|
use std::io::Write;
use std::sync::{Arc, RwLock};
use world::{self, block};
use shared::Direction;
use model::BlockVertex;
use render;
|
let get_liquid = if lava {
get_lava_level
} else {
get_water_level
};
let mut count = 0;
let (tl, tr, bl, br) = if get_liquid(snapshot, x, y + 1, z).is_some() {
(8, 8, 8, 8)
} else {
(
average_liquid_level(get_liquid, snapshot, x, y, z),
average_liquid_level(get_liquid, snapshot, x+1, y, z),
average_liquid_level(get_liquid, snapshot, x, y, z+1),
average_liquid_level(get_liquid, snapshot, x+1, y, z+1)
)
};
let tex = match snapshot.get_block(x, y, z) {
block::Block::Water{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_still"),
block::Block::FlowingWater{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_flow"),
block::Block::Lava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_still"),
block::Block::FlowingLava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_flow"),
_ => unreachable!(),
};
let ux1 = 0i16;
let ux2 = 16i16 * tex.get_width() as i16;
let uy1 = 0i16;
let uy2 = 16i16 * tex.get_height() as i16;
for dir in Direction::all() {
let (ox, oy, oz) = dir.get_offset();
let special = dir == Direction::Up && (tl < 8 || tr < 8 || bl < 8 || br < 8);
let block = snapshot.get_block(x+ox, y+oy, z+oz);
if special || (!block.get_material().should_cull_against && get_liquid(snapshot, x+ox, y+oy, z+oz).is_none()) {
let verts = BlockVertex::face_by_direction(dir);
for vert in verts {
let mut vert = vert.clone();
vert.tx = tex.get_x() as u16;
vert.ty = tex.get_y() as u16;
vert.tw = tex.get_width() as u16;
vert.th = tex.get_height() as u16;
vert.tatlas = tex.atlas as i16;
vert.r = 255;
vert.g = 255;
vert.b = 255;
if vert.y == 0.0 {
vert.y = y as f32;
} else {
let height = match (vert.x, vert.z) {
(0.0, 0.0) => ((16.0 / 8.0) * (tl as f32)) as i32,
(_, 0.0) => ((16.0 / 8.0) * (tr as f32)) as i32,
(0.0, _) => ((16.0 / 8.0) * (bl as f32)) as i32,
(_, _) => ((16.0 / 8.0) * (br as f32)) as i32,
};
vert.y = (height as f32)/16.0 + (y as f32);
}
vert.x += x as f32;
vert.z += z as f32;
let (bl, sl) = super::calculate_light(
snapshot,
x, y, z,
vert.x as f64,
vert.y as f64,
vert.z as f64,
dir,
!lava,
false
);
vert.block_light = bl;
vert.sky_light = sl;
if vert.toffsetx == 0 {
vert.toffsetx = ux1;
} else {
vert.toffsetx = ux2;
}
if vert.toffsety == 0 {
vert.toffsety = uy1;
} else {
vert.toffsety = uy2;
}
vert.write(buf);
}
count += 6;
}
}
count
}
fn average_liquid_level(
get: fn(&world::Snapshot, i32, i32, i32) -> Option<i32>,
snapshot: &world::Snapshot, x: i32, y: i32, z: i32
) -> i32 {
let mut level = 0;
for xx in -1.. 1 {
for zz in -1.. 1 {
if get(snapshot, x+xx, y+1, z+zz).is_some() {
return 8;
}
if let Some(l) = get(snapshot, x+xx, y, z+zz) {
let nl = 7 - (l & 0x7);
if nl > level {
level = nl;
}
}
}
}
level
}
fn get_water_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Water{level} | block::Block::FlowingWater{level} => Some(level as i32),
_ => None,
}
}
fn get_lava_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Lava{level} | block::Block::FlowingLava{level} => Some(level as i32),
_ => None,
}
}
|
pub fn render_liquid<W: Write>(textures: Arc<RwLock<render::TextureManager>>,lava: bool, snapshot: &world::Snapshot, x: i32, y: i32, z: i32, buf: &mut W) -> usize {
|
random_line_split
|
liquid.rs
|
use std::io::Write;
use std::sync::{Arc, RwLock};
use world::{self, block};
use shared::Direction;
use model::BlockVertex;
use render;
pub fn render_liquid<W: Write>(textures: Arc<RwLock<render::TextureManager>>,lava: bool, snapshot: &world::Snapshot, x: i32, y: i32, z: i32, buf: &mut W) -> usize {
let get_liquid = if lava {
get_lava_level
} else {
get_water_level
};
let mut count = 0;
let (tl, tr, bl, br) = if get_liquid(snapshot, x, y + 1, z).is_some() {
(8, 8, 8, 8)
} else {
(
average_liquid_level(get_liquid, snapshot, x, y, z),
average_liquid_level(get_liquid, snapshot, x+1, y, z),
average_liquid_level(get_liquid, snapshot, x, y, z+1),
average_liquid_level(get_liquid, snapshot, x+1, y, z+1)
)
};
let tex = match snapshot.get_block(x, y, z) {
block::Block::Water{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_still"),
block::Block::FlowingWater{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/water_flow"),
block::Block::Lava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_still"),
block::Block::FlowingLava{..} => render::Renderer::get_texture(&textures, "minecraft:blocks/lava_flow"),
_ => unreachable!(),
};
let ux1 = 0i16;
let ux2 = 16i16 * tex.get_width() as i16;
let uy1 = 0i16;
let uy2 = 16i16 * tex.get_height() as i16;
for dir in Direction::all() {
let (ox, oy, oz) = dir.get_offset();
let special = dir == Direction::Up && (tl < 8 || tr < 8 || bl < 8 || br < 8);
let block = snapshot.get_block(x+ox, y+oy, z+oz);
if special || (!block.get_material().should_cull_against && get_liquid(snapshot, x+ox, y+oy, z+oz).is_none()) {
let verts = BlockVertex::face_by_direction(dir);
for vert in verts {
let mut vert = vert.clone();
vert.tx = tex.get_x() as u16;
vert.ty = tex.get_y() as u16;
vert.tw = tex.get_width() as u16;
vert.th = tex.get_height() as u16;
vert.tatlas = tex.atlas as i16;
vert.r = 255;
vert.g = 255;
vert.b = 255;
if vert.y == 0.0 {
vert.y = y as f32;
} else {
let height = match (vert.x, vert.z) {
(0.0, 0.0) => ((16.0 / 8.0) * (tl as f32)) as i32,
(_, 0.0) => ((16.0 / 8.0) * (tr as f32)) as i32,
(0.0, _) => ((16.0 / 8.0) * (bl as f32)) as i32,
(_, _) => ((16.0 / 8.0) * (br as f32)) as i32,
};
vert.y = (height as f32)/16.0 + (y as f32);
}
vert.x += x as f32;
vert.z += z as f32;
let (bl, sl) = super::calculate_light(
snapshot,
x, y, z,
vert.x as f64,
vert.y as f64,
vert.z as f64,
dir,
!lava,
false
);
vert.block_light = bl;
vert.sky_light = sl;
if vert.toffsetx == 0 {
vert.toffsetx = ux1;
} else {
vert.toffsetx = ux2;
}
if vert.toffsety == 0 {
vert.toffsety = uy1;
} else {
vert.toffsety = uy2;
}
vert.write(buf);
}
count += 6;
}
}
count
}
fn average_liquid_level(
get: fn(&world::Snapshot, i32, i32, i32) -> Option<i32>,
snapshot: &world::Snapshot, x: i32, y: i32, z: i32
) -> i32
|
fn get_water_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Water{level} | block::Block::FlowingWater{level} => Some(level as i32),
_ => None,
}
}
fn get_lava_level(snapshot: &world::Snapshot, x: i32, y: i32, z: i32) -> Option<i32> {
match snapshot.get_block(x, y, z) {
block::Block::Lava{level} | block::Block::FlowingLava{level} => Some(level as i32),
_ => None,
}
}
|
{
let mut level = 0;
for xx in -1 .. 1 {
for zz in -1 .. 1 {
if get(snapshot, x+xx, y+1, z+zz).is_some() {
return 8;
}
if let Some(l) = get(snapshot, x+xx, y, z+zz) {
let nl = 7 - (l & 0x7);
if nl > level {
level = nl;
}
}
}
}
level
}
|
identifier_body
|
sub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lattice::CombineFieldsLatticeMethods;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::then;
use middle::typeck::infer::to_str::InferStr;
use middle::typeck::infer::{TypeTrace, Subtype};
use util::common::{indenter};
use util::ppaux::bound_region_to_str;
use syntax::ast::{Onceness, Purity};
pub struct Sub<'f>(CombineFields<'f>); // "subtype", "subregion" etc
impl<'f> Sub<'f> {
pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Sub(ref v) = *self; v }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.get_ref().infcx }
fn tag(&self) -> ~str { ~"sub" }
fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
fn trace(&self) -> TypeTrace { self.get_ref().trace }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(*self.get_ref()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(*self.get_ref()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(*self.get_ref()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).regions(b, a)
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.inf_str(self.get_ref().infcx),
b.inf_str(self.get_ref().infcx));
self.get_ref().infcx.region_vars.make_subregion(Subtype(self.get_ref().trace), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})", a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
eq_tys(self, a.ty, b.ty).then(|| Ok(*a))
}
MutImmutable => {
// Otherwise we can be covariant:
self.tys(a.ty, b.ty).and_then(|_t| Ok(*a) )
}
}
}
fn purities(&self, a: Purity, b: Purity) -> cres<Purity> {
self.lub().purities(a, b).compare(b, || {
ty::terr_purity_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn
|
(&self, a: BuiltinBounds, b: BuiltinBounds) -> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a == b { return Ok(a); }
let _indenter = indenter();
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().var_sub_var(a_id, b_id));
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), _) => {
if_ok!(self.get_ref().var_sub_t(a_id, b));
Ok(a)
}
(_, &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().t_sub_var(a, b_id));
Ok(a)
}
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Take a snapshot. We'll never roll this back, but in later
// phases we do want to be able to examine "all bindings that
// were created as part of this type comparison", and making a
// snapshot is a convenient way to do that.
let snapshot = self.get_ref().infcx.region_vars.start_snapshot();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
self.get_ref().trace, a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.get_ref().infcx.tcx, b, |br| {
let skol = self.get_ref().infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_str(self.get_ref().infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.inf_str(self.get_ref().infcx));
debug!("b_sig={}", b_sig.inf_str(self.get_ref().infcx));
// Compare types now that bound regions have been replaced.
let sig = if_ok!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.get_ref().infcx.region_vars.vars_created_since_snapshot(snapshot);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.get_ref().infcx.region_vars.tainted(snapshot, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
}
|
bounds
|
identifier_name
|
sub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lattice::CombineFieldsLatticeMethods;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::then;
use middle::typeck::infer::to_str::InferStr;
use middle::typeck::infer::{TypeTrace, Subtype};
use util::common::{indenter};
use util::ppaux::bound_region_to_str;
use syntax::ast::{Onceness, Purity};
pub struct Sub<'f>(CombineFields<'f>); // "subtype", "subregion" etc
impl<'f> Sub<'f> {
pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Sub(ref v) = *self; v }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.get_ref().infcx }
fn tag(&self) -> ~str { ~"sub" }
fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
fn trace(&self) -> TypeTrace { self.get_ref().trace }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(*self.get_ref()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(*self.get_ref()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(*self.get_ref()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).regions(b, a)
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.inf_str(self.get_ref().infcx),
b.inf_str(self.get_ref().infcx));
self.get_ref().infcx.region_vars.make_subregion(Subtype(self.get_ref().trace), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})", a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
eq_tys(self, a.ty, b.ty).then(|| Ok(*a))
}
MutImmutable => {
// Otherwise we can be covariant:
self.tys(a.ty, b.ty).and_then(|_t| Ok(*a) )
}
}
}
fn purities(&self, a: Purity, b: Purity) -> cres<Purity> {
self.lub().purities(a, b).compare(b, || {
ty::terr_purity_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn bounds(&self, a: BuiltinBounds, b: BuiltinBounds) -> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a == b { return Ok(a); }
let _indenter = indenter();
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
|
if_ok!(self.get_ref().var_sub_t(a_id, b));
Ok(a)
}
(_, &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().t_sub_var(a, b_id));
Ok(a)
}
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Take a snapshot. We'll never roll this back, but in later
// phases we do want to be able to examine "all bindings that
// were created as part of this type comparison", and making a
// snapshot is a convenient way to do that.
let snapshot = self.get_ref().infcx.region_vars.start_snapshot();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
self.get_ref().trace, a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.get_ref().infcx.tcx, b, |br| {
let skol = self.get_ref().infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_str(self.get_ref().infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.inf_str(self.get_ref().infcx));
debug!("b_sig={}", b_sig.inf_str(self.get_ref().infcx));
// Compare types now that bound regions have been replaced.
let sig = if_ok!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.get_ref().infcx.region_vars.vars_created_since_snapshot(snapshot);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.get_ref().infcx.region_vars.tainted(snapshot, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
}
|
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().var_sub_var(a_id, b_id));
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), _) => {
|
random_line_split
|
sub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lattice::CombineFieldsLatticeMethods;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::then;
use middle::typeck::infer::to_str::InferStr;
use middle::typeck::infer::{TypeTrace, Subtype};
use util::common::{indenter};
use util::ppaux::bound_region_to_str;
use syntax::ast::{Onceness, Purity};
pub struct Sub<'f>(CombineFields<'f>); // "subtype", "subregion" etc
impl<'f> Sub<'f> {
pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Sub(ref v) = *self; v }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a>
|
fn tag(&self) -> ~str { ~"sub" }
fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
fn trace(&self) -> TypeTrace { self.get_ref().trace }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(*self.get_ref()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(*self.get_ref()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(*self.get_ref()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).regions(b, a)
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.inf_str(self.get_ref().infcx),
b.inf_str(self.get_ref().infcx));
self.get_ref().infcx.region_vars.make_subregion(Subtype(self.get_ref().trace), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})", a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
eq_tys(self, a.ty, b.ty).then(|| Ok(*a))
}
MutImmutable => {
// Otherwise we can be covariant:
self.tys(a.ty, b.ty).and_then(|_t| Ok(*a) )
}
}
}
fn purities(&self, a: Purity, b: Purity) -> cres<Purity> {
self.lub().purities(a, b).compare(b, || {
ty::terr_purity_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn bounds(&self, a: BuiltinBounds, b: BuiltinBounds) -> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a == b { return Ok(a); }
let _indenter = indenter();
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().var_sub_var(a_id, b_id));
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), _) => {
if_ok!(self.get_ref().var_sub_t(a_id, b));
Ok(a)
}
(_, &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().t_sub_var(a, b_id));
Ok(a)
}
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Take a snapshot. We'll never roll this back, but in later
// phases we do want to be able to examine "all bindings that
// were created as part of this type comparison", and making a
// snapshot is a convenient way to do that.
let snapshot = self.get_ref().infcx.region_vars.start_snapshot();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
self.get_ref().trace, a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.get_ref().infcx.tcx, b, |br| {
let skol = self.get_ref().infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_str(self.get_ref().infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.inf_str(self.get_ref().infcx));
debug!("b_sig={}", b_sig.inf_str(self.get_ref().infcx));
// Compare types now that bound regions have been replaced.
let sig = if_ok!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.get_ref().infcx.region_vars.vars_created_since_snapshot(snapshot);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.get_ref().infcx.region_vars.tainted(snapshot, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
}
|
{ self.get_ref().infcx }
|
identifier_body
|
sub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lattice::CombineFieldsLatticeMethods;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::then;
use middle::typeck::infer::to_str::InferStr;
use middle::typeck::infer::{TypeTrace, Subtype};
use util::common::{indenter};
use util::ppaux::bound_region_to_str;
use syntax::ast::{Onceness, Purity};
pub struct Sub<'f>(CombineFields<'f>); // "subtype", "subregion" etc
impl<'f> Sub<'f> {
pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Sub(ref v) = *self; v }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.get_ref().infcx }
fn tag(&self) -> ~str { ~"sub" }
fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
fn trace(&self) -> TypeTrace { self.get_ref().trace }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(*self.get_ref()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(*self.get_ref()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(*self.get_ref()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> {
let opp = CombineFields {
a_is_expected:!self.get_ref().a_is_expected,.. *self.get_ref()
};
Sub(opp).regions(b, a)
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.inf_str(self.get_ref().infcx),
b.inf_str(self.get_ref().infcx));
self.get_ref().infcx.region_vars.make_subregion(Subtype(self.get_ref().trace), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})", a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
eq_tys(self, a.ty, b.ty).then(|| Ok(*a))
}
MutImmutable => {
// Otherwise we can be covariant:
self.tys(a.ty, b.ty).and_then(|_t| Ok(*a) )
}
}
}
fn purities(&self, a: Purity, b: Purity) -> cres<Purity> {
self.lub().purities(a, b).compare(b, || {
ty::terr_purity_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn bounds(&self, a: BuiltinBounds, b: BuiltinBounds) -> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
if a == b { return Ok(a); }
let _indenter = indenter();
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().var_sub_var(a_id, b_id));
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), _) =>
|
(_, &ty::ty_infer(TyVar(b_id))) => {
if_ok!(self.get_ref().t_sub_var(a, b_id));
Ok(a)
}
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.inf_str(self.get_ref().infcx), b.inf_str(self.get_ref().infcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Take a snapshot. We'll never roll this back, but in later
// phases we do want to be able to examine "all bindings that
// were created as part of this type comparison", and making a
// snapshot is a convenient way to do that.
let snapshot = self.get_ref().infcx.region_vars.start_snapshot();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
self.get_ref().trace, a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.get_ref().infcx.tcx, b, |br| {
let skol = self.get_ref().infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_str(self.get_ref().infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.inf_str(self.get_ref().infcx));
debug!("b_sig={}", b_sig.inf_str(self.get_ref().infcx));
// Compare types now that bound regions have been replaced.
let sig = if_ok!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.get_ref().infcx.region_vars.vars_created_since_snapshot(snapshot);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.get_ref().infcx.region_vars.tainted(snapshot, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
}
|
{
if_ok!(self.get_ref().var_sub_t(a_id, b));
Ok(a)
}
|
conditional_block
|
helpers.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::str;
use std::sync::Arc;
use rustc_serialize::hex::FromHex;
use env_logger::LogBuilder;
use ServerBuilder;
use Server;
use hash_fetch::urlhint::ContractClient;
use util::{Bytes, Address, Mutex, ToPretty};
use devtools::http_client;
const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2";
const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000";
const SIGNER_PORT: u16 = 18180;
pub struct FakeRegistrar {
pub calls: Arc<Mutex<Vec<(String, String)>>>,
pub responses: Mutex<Vec<Result<Bytes, String>>>,
}
impl FakeRegistrar {
fn new() -> Self {
FakeRegistrar {
calls: Arc::new(Mutex::new(Vec::new())),
responses: Mutex::new(
vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok(Vec::new())
]
),
}
}
}
impl ContractClient for FakeRegistrar {
fn registrar(&self) -> Result<Address, String> {
Ok(REGISTRAR.parse().unwrap())
}
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
self.calls.lock().push((address.to_hex(), data.to_hex()));
self.responses.lock().remove(0)
}
}
fn init_logger() {
// Initialize logger
if let Ok(log) = env::var("RUST_LOG")
|
}
pub fn init_server(hosts: Option<Vec<String>>, is_syncing: bool) -> (Server, Arc<FakeRegistrar>) {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar.clone());
builder.with_sync_status(Arc::new(move || is_syncing));
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
(
builder.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), hosts).unwrap(),
registrar,
)
}
pub fn serve_with_auth(user: &str, pass: &str) -> Server {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar);
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
builder.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), None, user, pass).unwrap()
}
pub fn serve_hosts(hosts: Option<Vec<String>>) -> Server {
init_server(hosts, false).0
}
pub fn serve_with_registrar() -> (Server, Arc<FakeRegistrar>) {
init_server(None, false)
}
pub fn serve_with_registrar_and_sync() -> (Server, Arc<FakeRegistrar>) {
init_server(None, true)
}
pub fn serve() -> Server {
init_server(None, false).0
}
pub fn request(server: Server, request: &str) -> http_client::Response {
http_client::request(server.addr(), request)
}
pub fn assert_security_headers(headers: &[String]) {
http_client::assert_security_headers_present(headers, None)
}
pub fn assert_security_headers_for_embed(headers: &[String]) {
http_client::assert_security_headers_present(headers, Some(SIGNER_PORT))
}
|
{
let mut builder = LogBuilder::new();
builder.parse(&log);
builder.init().expect("Logger is initialized only once.");
}
|
conditional_block
|
helpers.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::str;
use std::sync::Arc;
use rustc_serialize::hex::FromHex;
use env_logger::LogBuilder;
use ServerBuilder;
use Server;
use hash_fetch::urlhint::ContractClient;
use util::{Bytes, Address, Mutex, ToPretty};
use devtools::http_client;
const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2";
const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000";
const SIGNER_PORT: u16 = 18180;
pub struct FakeRegistrar {
pub calls: Arc<Mutex<Vec<(String, String)>>>,
pub responses: Mutex<Vec<Result<Bytes, String>>>,
}
impl FakeRegistrar {
fn new() -> Self {
FakeRegistrar {
calls: Arc::new(Mutex::new(Vec::new())),
responses: Mutex::new(
vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok(Vec::new())
]
),
}
}
}
impl ContractClient for FakeRegistrar {
fn registrar(&self) -> Result<Address, String> {
Ok(REGISTRAR.parse().unwrap())
}
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
self.calls.lock().push((address.to_hex(), data.to_hex()));
self.responses.lock().remove(0)
}
}
fn init_logger() {
// Initialize logger
if let Ok(log) = env::var("RUST_LOG") {
let mut builder = LogBuilder::new();
builder.parse(&log);
builder.init().expect("Logger is initialized only once.");
}
}
pub fn init_server(hosts: Option<Vec<String>>, is_syncing: bool) -> (Server, Arc<FakeRegistrar>) {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar.clone());
builder.with_sync_status(Arc::new(move || is_syncing));
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
(
builder.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), hosts).unwrap(),
registrar,
)
}
pub fn serve_with_auth(user: &str, pass: &str) -> Server {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar);
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
builder.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), None, user, pass).unwrap()
}
pub fn serve_hosts(hosts: Option<Vec<String>>) -> Server {
init_server(hosts, false).0
}
pub fn serve_with_registrar() -> (Server, Arc<FakeRegistrar>) {
init_server(None, false)
}
pub fn serve_with_registrar_and_sync() -> (Server, Arc<FakeRegistrar>) {
init_server(None, true)
}
pub fn serve() -> Server {
init_server(None, false).0
}
pub fn request(server: Server, request: &str) -> http_client::Response
|
pub fn assert_security_headers(headers: &[String]) {
http_client::assert_security_headers_present(headers, None)
}
pub fn assert_security_headers_for_embed(headers: &[String]) {
http_client::assert_security_headers_present(headers, Some(SIGNER_PORT))
}
|
{
http_client::request(server.addr(), request)
}
|
identifier_body
|
helpers.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::str;
use std::sync::Arc;
use rustc_serialize::hex::FromHex;
use env_logger::LogBuilder;
use ServerBuilder;
use Server;
use hash_fetch::urlhint::ContractClient;
use util::{Bytes, Address, Mutex, ToPretty};
use devtools::http_client;
const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2";
const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000";
const SIGNER_PORT: u16 = 18180;
pub struct FakeRegistrar {
pub calls: Arc<Mutex<Vec<(String, String)>>>,
pub responses: Mutex<Vec<Result<Bytes, String>>>,
}
impl FakeRegistrar {
fn new() -> Self {
FakeRegistrar {
calls: Arc::new(Mutex::new(Vec::new())),
responses: Mutex::new(
vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok(Vec::new())
]
),
}
}
}
impl ContractClient for FakeRegistrar {
fn registrar(&self) -> Result<Address, String> {
Ok(REGISTRAR.parse().unwrap())
}
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
self.calls.lock().push((address.to_hex(), data.to_hex()));
self.responses.lock().remove(0)
}
}
fn init_logger() {
// Initialize logger
if let Ok(log) = env::var("RUST_LOG") {
let mut builder = LogBuilder::new();
builder.parse(&log);
builder.init().expect("Logger is initialized only once.");
|
}
}
pub fn init_server(hosts: Option<Vec<String>>, is_syncing: bool) -> (Server, Arc<FakeRegistrar>) {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar.clone());
builder.with_sync_status(Arc::new(move || is_syncing));
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
(
builder.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), hosts).unwrap(),
registrar,
)
}
pub fn serve_with_auth(user: &str, pass: &str) -> Server {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar);
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
builder.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), None, user, pass).unwrap()
}
pub fn serve_hosts(hosts: Option<Vec<String>>) -> Server {
init_server(hosts, false).0
}
pub fn serve_with_registrar() -> (Server, Arc<FakeRegistrar>) {
init_server(None, false)
}
pub fn serve_with_registrar_and_sync() -> (Server, Arc<FakeRegistrar>) {
init_server(None, true)
}
pub fn serve() -> Server {
init_server(None, false).0
}
pub fn request(server: Server, request: &str) -> http_client::Response {
http_client::request(server.addr(), request)
}
pub fn assert_security_headers(headers: &[String]) {
http_client::assert_security_headers_present(headers, None)
}
pub fn assert_security_headers_for_embed(headers: &[String]) {
http_client::assert_security_headers_present(headers, Some(SIGNER_PORT))
}
|
random_line_split
|
|
helpers.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::str;
use std::sync::Arc;
use rustc_serialize::hex::FromHex;
use env_logger::LogBuilder;
use ServerBuilder;
use Server;
use hash_fetch::urlhint::ContractClient;
use util::{Bytes, Address, Mutex, ToPretty};
use devtools::http_client;
const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2";
const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000";
const SIGNER_PORT: u16 = 18180;
pub struct FakeRegistrar {
pub calls: Arc<Mutex<Vec<(String, String)>>>,
pub responses: Mutex<Vec<Result<Bytes, String>>>,
}
impl FakeRegistrar {
fn new() -> Self {
FakeRegistrar {
calls: Arc::new(Mutex::new(Vec::new())),
responses: Mutex::new(
vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok(Vec::new())
]
),
}
}
}
impl ContractClient for FakeRegistrar {
fn registrar(&self) -> Result<Address, String> {
Ok(REGISTRAR.parse().unwrap())
}
fn
|
(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
self.calls.lock().push((address.to_hex(), data.to_hex()));
self.responses.lock().remove(0)
}
}
fn init_logger() {
// Initialize logger
if let Ok(log) = env::var("RUST_LOG") {
let mut builder = LogBuilder::new();
builder.parse(&log);
builder.init().expect("Logger is initialized only once.");
}
}
pub fn init_server(hosts: Option<Vec<String>>, is_syncing: bool) -> (Server, Arc<FakeRegistrar>) {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar.clone());
builder.with_sync_status(Arc::new(move || is_syncing));
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
(
builder.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), hosts).unwrap(),
registrar,
)
}
pub fn serve_with_auth(user: &str, pass: &str) -> Server {
init_logger();
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar);
builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)));
builder.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), None, user, pass).unwrap()
}
pub fn serve_hosts(hosts: Option<Vec<String>>) -> Server {
init_server(hosts, false).0
}
pub fn serve_with_registrar() -> (Server, Arc<FakeRegistrar>) {
init_server(None, false)
}
pub fn serve_with_registrar_and_sync() -> (Server, Arc<FakeRegistrar>) {
init_server(None, true)
}
pub fn serve() -> Server {
init_server(None, false).0
}
pub fn request(server: Server, request: &str) -> http_client::Response {
http_client::request(server.addr(), request)
}
pub fn assert_security_headers(headers: &[String]) {
http_client::assert_security_headers_present(headers, None)
}
pub fn assert_security_headers_for_embed(headers: &[String]) {
http_client::assert_security_headers_present(headers, Some(SIGNER_PORT))
}
|
call
|
identifier_name
|
feature_flags.rs
|
use fuse::{FileType, ReplyAttr, ReplyData, ReplyEntry, Request};
use fs::constants;
use fs::GoodDataFS;
use fs::helpers::create_inode_file_attributes;
use fs::item;
use fs::inode;
use helpers;
use object;
use std::path::Path;
use super::project_from_inode;
fn getattr(fs: &mut GoodDataFS, _req: &Request, ino: u64, reply: ReplyAttr) {
let project: &object::Project = &project_from_inode(fs, ino);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(ino, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.attr(&constants::DEFAULT_TTL, &attr);
}
}
fn lookup(fs: &mut GoodDataFS, _req: &Request, parent: u64, _name: &Path, reply: ReplyEntry) {
let inode_parent = inode::Inode::deserialize(parent);
let inode = inode::Inode::create(inode_parent.project,
constants::Category::Internal as u8,
0,
constants::ReservedFile::FeatureFlagsJson as u8);
let project: &object::Project = &project_from_inode(fs, inode_parent);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(inode, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.entry(&constants::DEFAULT_TTL, &attr, 0);
}
}
fn
|
(fs: &mut GoodDataFS, inode: inode::Inode, reply: ReplyData, offset: u64, size: u32) {
debug!("read() - Reading {}",
constants::FEATURE_FLAGS_JSON_FILENAME);
let project: &object::Project = &project_from_inode(fs, inode);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
reply.data(helpers::read_bytes(&json, offset, size));
}
}
pub const ITEM: item::ProjectItem = item::ProjectItem {
category: constants::Category::Internal as u8,
reserved: constants::ReservedFile::FeatureFlagsJson as u8,
item_type: FileType::RegularFile,
path: constants::FEATURE_FLAGS_JSON_FILENAME,
getattr: getattr,
lookup: lookup,
read: read,
};
|
read
|
identifier_name
|
feature_flags.rs
|
use fuse::{FileType, ReplyAttr, ReplyData, ReplyEntry, Request};
use fs::constants;
use fs::GoodDataFS;
use fs::helpers::create_inode_file_attributes;
use fs::item;
use fs::inode;
use helpers;
use object;
use std::path::Path;
use super::project_from_inode;
fn getattr(fs: &mut GoodDataFS, _req: &Request, ino: u64, reply: ReplyAttr)
|
fn lookup(fs: &mut GoodDataFS, _req: &Request, parent: u64, _name: &Path, reply: ReplyEntry) {
let inode_parent = inode::Inode::deserialize(parent);
let inode = inode::Inode::create(inode_parent.project,
constants::Category::Internal as u8,
0,
constants::ReservedFile::FeatureFlagsJson as u8);
let project: &object::Project = &project_from_inode(fs, inode_parent);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(inode, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.entry(&constants::DEFAULT_TTL, &attr, 0);
}
}
fn read(fs: &mut GoodDataFS, inode: inode::Inode, reply: ReplyData, offset: u64, size: u32) {
debug!("read() - Reading {}",
constants::FEATURE_FLAGS_JSON_FILENAME);
let project: &object::Project = &project_from_inode(fs, inode);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
reply.data(helpers::read_bytes(&json, offset, size));
}
}
pub const ITEM: item::ProjectItem = item::ProjectItem {
category: constants::Category::Internal as u8,
reserved: constants::ReservedFile::FeatureFlagsJson as u8,
item_type: FileType::RegularFile,
path: constants::FEATURE_FLAGS_JSON_FILENAME,
getattr: getattr,
lookup: lookup,
read: read,
};
|
{
let project: &object::Project = &project_from_inode(fs, ino);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(ino, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.attr(&constants::DEFAULT_TTL, &attr);
}
}
|
identifier_body
|
feature_flags.rs
|
use fuse::{FileType, ReplyAttr, ReplyData, ReplyEntry, Request};
use fs::constants;
use fs::GoodDataFS;
use fs::helpers::create_inode_file_attributes;
use fs::item;
use fs::inode;
use helpers;
use object;
use std::path::Path;
use super::project_from_inode;
fn getattr(fs: &mut GoodDataFS, _req: &Request, ino: u64, reply: ReplyAttr) {
let project: &object::Project = &project_from_inode(fs, ino);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(ino, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.attr(&constants::DEFAULT_TTL, &attr);
}
}
fn lookup(fs: &mut GoodDataFS, _req: &Request, parent: u64, _name: &Path, reply: ReplyEntry) {
let inode_parent = inode::Inode::deserialize(parent);
let inode = inode::Inode::create(inode_parent.project,
constants::Category::Internal as u8,
0,
constants::ReservedFile::FeatureFlagsJson as u8);
let project: &object::Project = &project_from_inode(fs, inode_parent);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some()
|
}
fn read(fs: &mut GoodDataFS, inode: inode::Inode, reply: ReplyData, offset: u64, size: u32) {
debug!("read() - Reading {}",
constants::FEATURE_FLAGS_JSON_FILENAME);
let project: &object::Project = &project_from_inode(fs, inode);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
reply.data(helpers::read_bytes(&json, offset, size));
}
}
pub const ITEM: item::ProjectItem = item::ProjectItem {
category: constants::Category::Internal as u8,
reserved: constants::ReservedFile::FeatureFlagsJson as u8,
item_type: FileType::RegularFile,
path: constants::FEATURE_FLAGS_JSON_FILENAME,
getattr: getattr,
lookup: lookup,
read: read,
};
|
{
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(inode, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.entry(&constants::DEFAULT_TTL, &attr, 0);
}
|
conditional_block
|
feature_flags.rs
|
use fuse::{FileType, ReplyAttr, ReplyData, ReplyEntry, Request};
use fs::constants;
use fs::GoodDataFS;
use fs::helpers::create_inode_file_attributes;
use fs::item;
use fs::inode;
use helpers;
use object;
use std::path::Path;
use super::project_from_inode;
fn getattr(fs: &mut GoodDataFS, _req: &Request, ino: u64, reply: ReplyAttr) {
let project: &object::Project = &project_from_inode(fs, ino);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(ino, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.attr(&constants::DEFAULT_TTL, &attr);
}
}
fn lookup(fs: &mut GoodDataFS, _req: &Request, parent: u64, _name: &Path, reply: ReplyEntry) {
let inode_parent = inode::Inode::deserialize(parent);
let inode = inode::Inode::create(inode_parent.project,
constants::Category::Internal as u8,
0,
constants::ReservedFile::FeatureFlagsJson as u8);
let project: &object::Project = &project_from_inode(fs, inode_parent);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
let json: String = feature_flags.unwrap().into();
let attr =
create_inode_file_attributes(inode, json.len() as u64, constants::DEFAULT_CREATE_TIME);
reply.entry(&constants::DEFAULT_TTL, &attr, 0);
}
}
fn read(fs: &mut GoodDataFS, inode: inode::Inode, reply: ReplyData, offset: u64, size: u32) {
debug!("read() - Reading {}",
constants::FEATURE_FLAGS_JSON_FILENAME);
let project: &object::Project = &project_from_inode(fs, inode);
let feature_flags = project.feature_flags(&mut fs.client.connector);
if feature_flags.is_some() {
|
pub const ITEM: item::ProjectItem = item::ProjectItem {
category: constants::Category::Internal as u8,
reserved: constants::ReservedFile::FeatureFlagsJson as u8,
item_type: FileType::RegularFile,
path: constants::FEATURE_FLAGS_JSON_FILENAME,
getattr: getattr,
lookup: lookup,
read: read,
};
|
let json: String = feature_flags.unwrap().into();
reply.data(helpers::read_bytes(&json, offset, size));
}
}
|
random_line_split
|
mod.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! `JournalDB` interface and implementation.
use std::{fmt, str};
use std::sync::Arc;
/// Export the journaldb module.
pub mod traits;
mod archivedb;
mod earlymergedb;
mod overlayrecentdb;
mod refcounteddb;
/// Export the `JournalDB` trait.
pub use self::traits::JournalDB;
/// A journal database algorithm.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Algorithm {
/// Keep all keys forever.
Archive,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into backing database, journal retains knowledge of whether backing DB key is
/// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB.
EarlyMerge,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets
/// flushed in backing only at end of recent history.
OverlayRecent,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// References are counted in disk-backed DB.
RefCounted,
}
impl Default for Algorithm {
fn default() -> Algorithm {
Algorithm::OverlayRecent
}
}
impl str::FromStr for Algorithm {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"archive" => Ok(Algorithm::Archive),
"light" => Ok(Algorithm::EarlyMerge),
"fast" => Ok(Algorithm::OverlayRecent),
"basic" => Ok(Algorithm::RefCounted),
e => Err(format!("Invalid algorithm: {}", e)),
}
}
}
impl Algorithm {
/// Returns static str describing journal database algorithm.
pub fn as_str(&self) -> &'static str {
match *self {
Algorithm::Archive => "archive",
Algorithm::EarlyMerge => "light",
Algorithm::OverlayRecent => "fast",
Algorithm::RefCounted => "basic",
}
}
/// Returns static str describing journal database algorithm.
pub fn as_internal_name_str(&self) -> &'static str {
match *self {
Algorithm::Archive => "archive",
Algorithm::EarlyMerge => "earlymerge",
Algorithm::OverlayRecent => "overlayrecent",
Algorithm::RefCounted => "refcounted",
}
}
|
_ => false,
}
}
/// Returns all algorithm types.
pub fn all_types() -> Vec<Algorithm> {
vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted]
}
}
impl fmt::Display for Algorithm {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
/// Create a new `JournalDB` trait object over a generic key-value database.
pub fn new(backing: Arc<::kvdb::KeyValueDB>, algorithm: Algorithm, col: Option<u32>) -> Box<JournalDB> {
match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),
Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)),
Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)),
}
}
// all keys must be at least 12 bytes
const DB_PREFIX_LEN: usize = ::kvdb::PREFIX_LEN;
const LATEST_ERA_KEY: [u8; ::kvdb::PREFIX_LEN] = [b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0];
#[cfg(test)]
mod tests {
use super::Algorithm;
#[test]
fn test_journal_algorithm_parsing() {
assert_eq!(Algorithm::Archive, "archive".parse().unwrap());
assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap());
assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap());
assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap());
}
#[test]
fn test_journal_algorithm_printing() {
assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned());
assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned());
assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned());
assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned());
}
#[test]
fn test_journal_algorithm_is_stable() {
assert!(Algorithm::Archive.is_stable());
assert!(Algorithm::OverlayRecent.is_stable());
assert!(!Algorithm::EarlyMerge.is_stable());
assert!(!Algorithm::RefCounted.is_stable());
}
#[test]
fn test_journal_algorithm_default() {
assert_eq!(Algorithm::default(), Algorithm::OverlayRecent);
}
#[test]
fn test_journal_algorithm_all_types() {
// compiling should fail if some cases are not covered
let mut archive = 0;
let mut earlymerge = 0;
let mut overlayrecent = 0;
let mut refcounted = 0;
for a in &Algorithm::all_types() {
match *a {
Algorithm::Archive => archive += 1,
Algorithm::EarlyMerge => earlymerge += 1,
Algorithm::OverlayRecent => overlayrecent += 1,
Algorithm::RefCounted => refcounted += 1,
}
}
assert_eq!(archive, 1);
assert_eq!(earlymerge, 1);
assert_eq!(overlayrecent, 1);
assert_eq!(refcounted, 1);
}
}
|
/// Returns true if pruning strategy is stable
pub fn is_stable(&self) -> bool {
match *self {
Algorithm::Archive | Algorithm::OverlayRecent => true,
|
random_line_split
|
mod.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! `JournalDB` interface and implementation.
use std::{fmt, str};
use std::sync::Arc;
/// Export the journaldb module.
pub mod traits;
mod archivedb;
mod earlymergedb;
mod overlayrecentdb;
mod refcounteddb;
/// Export the `JournalDB` trait.
pub use self::traits::JournalDB;
/// A journal database algorithm.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Algorithm {
/// Keep all keys forever.
Archive,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into backing database, journal retains knowledge of whether backing DB key is
/// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB.
EarlyMerge,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets
/// flushed in backing only at end of recent history.
OverlayRecent,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// References are counted in disk-backed DB.
RefCounted,
}
impl Default for Algorithm {
fn default() -> Algorithm {
Algorithm::OverlayRecent
}
}
impl str::FromStr for Algorithm {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"archive" => Ok(Algorithm::Archive),
"light" => Ok(Algorithm::EarlyMerge),
"fast" => Ok(Algorithm::OverlayRecent),
"basic" => Ok(Algorithm::RefCounted),
e => Err(format!("Invalid algorithm: {}", e)),
}
}
}
impl Algorithm {
/// Returns static str describing journal database algorithm.
pub fn as_str(&self) -> &'static str {
match *self {
Algorithm::Archive => "archive",
Algorithm::EarlyMerge => "light",
Algorithm::OverlayRecent => "fast",
Algorithm::RefCounted => "basic",
}
}
/// Returns static str describing journal database algorithm.
pub fn
|
(&self) -> &'static str {
match *self {
Algorithm::Archive => "archive",
Algorithm::EarlyMerge => "earlymerge",
Algorithm::OverlayRecent => "overlayrecent",
Algorithm::RefCounted => "refcounted",
}
}
/// Returns true if pruning strategy is stable
pub fn is_stable(&self) -> bool {
match *self {
Algorithm::Archive | Algorithm::OverlayRecent => true,
_ => false,
}
}
/// Returns all algorithm types.
pub fn all_types() -> Vec<Algorithm> {
vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted]
}
}
impl fmt::Display for Algorithm {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
/// Create a new `JournalDB` trait object over a generic key-value database.
pub fn new(backing: Arc<::kvdb::KeyValueDB>, algorithm: Algorithm, col: Option<u32>) -> Box<JournalDB> {
match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),
Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)),
Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)),
}
}
// all keys must be at least 12 bytes
const DB_PREFIX_LEN: usize = ::kvdb::PREFIX_LEN;
const LATEST_ERA_KEY: [u8; ::kvdb::PREFIX_LEN] = [b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0];
#[cfg(test)]
mod tests {
use super::Algorithm;
#[test]
fn test_journal_algorithm_parsing() {
assert_eq!(Algorithm::Archive, "archive".parse().unwrap());
assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap());
assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap());
assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap());
}
#[test]
fn test_journal_algorithm_printing() {
assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned());
assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned());
assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned());
assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned());
}
#[test]
fn test_journal_algorithm_is_stable() {
assert!(Algorithm::Archive.is_stable());
assert!(Algorithm::OverlayRecent.is_stable());
assert!(!Algorithm::EarlyMerge.is_stable());
assert!(!Algorithm::RefCounted.is_stable());
}
#[test]
fn test_journal_algorithm_default() {
assert_eq!(Algorithm::default(), Algorithm::OverlayRecent);
}
#[test]
fn test_journal_algorithm_all_types() {
// compiling should fail if some cases are not covered
let mut archive = 0;
let mut earlymerge = 0;
let mut overlayrecent = 0;
let mut refcounted = 0;
for a in &Algorithm::all_types() {
match *a {
Algorithm::Archive => archive += 1,
Algorithm::EarlyMerge => earlymerge += 1,
Algorithm::OverlayRecent => overlayrecent += 1,
Algorithm::RefCounted => refcounted += 1,
}
}
assert_eq!(archive, 1);
assert_eq!(earlymerge, 1);
assert_eq!(overlayrecent, 1);
assert_eq!(refcounted, 1);
}
}
|
as_internal_name_str
|
identifier_name
|
mod.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! `JournalDB` interface and implementation.
use std::{fmt, str};
use std::sync::Arc;
/// Export the journaldb module.
pub mod traits;
mod archivedb;
mod earlymergedb;
mod overlayrecentdb;
mod refcounteddb;
/// Export the `JournalDB` trait.
pub use self::traits::JournalDB;
/// A journal database algorithm.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Algorithm {
/// Keep all keys forever.
Archive,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into backing database, journal retains knowledge of whether backing DB key is
/// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB.
EarlyMerge,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets
/// flushed in backing only at end of recent history.
OverlayRecent,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// References are counted in disk-backed DB.
RefCounted,
}
impl Default for Algorithm {
fn default() -> Algorithm {
Algorithm::OverlayRecent
}
}
impl str::FromStr for Algorithm {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"archive" => Ok(Algorithm::Archive),
"light" => Ok(Algorithm::EarlyMerge),
"fast" => Ok(Algorithm::OverlayRecent),
"basic" => Ok(Algorithm::RefCounted),
e => Err(format!("Invalid algorithm: {}", e)),
}
}
}
impl Algorithm {
/// Returns static str describing journal database algorithm.
pub fn as_str(&self) -> &'static str {
match *self {
Algorithm::Archive => "archive",
Algorithm::EarlyMerge => "light",
Algorithm::OverlayRecent => "fast",
Algorithm::RefCounted => "basic",
}
}
/// Returns static str describing journal database algorithm.
pub fn as_internal_name_str(&self) -> &'static str {
match *self {
Algorithm::Archive => "archive",
Algorithm::EarlyMerge => "earlymerge",
Algorithm::OverlayRecent => "overlayrecent",
Algorithm::RefCounted => "refcounted",
}
}
/// Returns true if pruning strategy is stable
pub fn is_stable(&self) -> bool {
match *self {
Algorithm::Archive | Algorithm::OverlayRecent => true,
_ => false,
}
}
/// Returns all algorithm types.
pub fn all_types() -> Vec<Algorithm> {
vec![Algorithm::Archive, Algorithm::EarlyMerge, Algorithm::OverlayRecent, Algorithm::RefCounted]
}
}
impl fmt::Display for Algorithm {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
/// Create a new `JournalDB` trait object over a generic key-value database.
pub fn new(backing: Arc<::kvdb::KeyValueDB>, algorithm: Algorithm, col: Option<u32>) -> Box<JournalDB> {
match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),
Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)),
Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)),
}
}
// all keys must be at least 12 bytes
const DB_PREFIX_LEN: usize = ::kvdb::PREFIX_LEN;
const LATEST_ERA_KEY: [u8; ::kvdb::PREFIX_LEN] = [b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0];
#[cfg(test)]
mod tests {
use super::Algorithm;
#[test]
fn test_journal_algorithm_parsing() {
assert_eq!(Algorithm::Archive, "archive".parse().unwrap());
assert_eq!(Algorithm::EarlyMerge, "light".parse().unwrap());
assert_eq!(Algorithm::OverlayRecent, "fast".parse().unwrap());
assert_eq!(Algorithm::RefCounted, "basic".parse().unwrap());
}
#[test]
fn test_journal_algorithm_printing()
|
#[test]
fn test_journal_algorithm_is_stable() {
assert!(Algorithm::Archive.is_stable());
assert!(Algorithm::OverlayRecent.is_stable());
assert!(!Algorithm::EarlyMerge.is_stable());
assert!(!Algorithm::RefCounted.is_stable());
}
#[test]
fn test_journal_algorithm_default() {
assert_eq!(Algorithm::default(), Algorithm::OverlayRecent);
}
#[test]
fn test_journal_algorithm_all_types() {
// compiling should fail if some cases are not covered
let mut archive = 0;
let mut earlymerge = 0;
let mut overlayrecent = 0;
let mut refcounted = 0;
for a in &Algorithm::all_types() {
match *a {
Algorithm::Archive => archive += 1,
Algorithm::EarlyMerge => earlymerge += 1,
Algorithm::OverlayRecent => overlayrecent += 1,
Algorithm::RefCounted => refcounted += 1,
}
}
assert_eq!(archive, 1);
assert_eq!(earlymerge, 1);
assert_eq!(overlayrecent, 1);
assert_eq!(refcounted, 1);
}
}
|
{
assert_eq!(Algorithm::Archive.to_string(), "archive".to_owned());
assert_eq!(Algorithm::EarlyMerge.to_string(), "light".to_owned());
assert_eq!(Algorithm::OverlayRecent.to_string(), "fast".to_owned());
assert_eq!(Algorithm::RefCounted.to_string(), "basic".to_owned());
}
|
identifier_body
|
rm_elems.rs
|
// svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
use task::short::EId;
// TODO: to mod::utils
pub fn remove_element(doc: &mut Document, id: EId) {
doc.drain(|n| n.is_tag_name(id));
}
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions, ElementId};
macro_rules! test {
($name:ident, $id:expr, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let mut doc = Document::from_str($in_text).unwrap();
remove_element(&mut doc, $id);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
// TODO: this tests should be in svgdom
test!(rm_1, ElementId::Title,
"<svg>
<title/>
</svg>",
"<svg/>
");
test!(rm_2, ElementId::Title,
"<svg>
<title/>
<title/>
<rect/>
<title/>
</svg>",
"<svg>
<rect/>
</svg>
");
test!(rm_3, ElementId::Title,
"<svg>
<title>
<title/>
<rect/>
</title>
<rect/>
</svg>",
"<svg>
<rect/>
</svg>
");
}
|
use svgdom::Document;
|
random_line_split
|
rm_elems.rs
|
// svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
use svgdom::Document;
use task::short::EId;
// TODO: to mod::utils
pub fn remove_element(doc: &mut Document, id: EId)
|
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions, ElementId};
macro_rules! test {
($name:ident, $id:expr, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let mut doc = Document::from_str($in_text).unwrap();
remove_element(&mut doc, $id);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
// TODO: this tests should be in svgdom
test!(rm_1, ElementId::Title,
"<svg>
<title/>
</svg>",
"<svg/>
");
test!(rm_2, ElementId::Title,
"<svg>
<title/>
<title/>
<rect/>
<title/>
</svg>",
"<svg>
<rect/>
</svg>
");
test!(rm_3, ElementId::Title,
"<svg>
<title>
<title/>
<rect/>
</title>
<rect/>
</svg>",
"<svg>
<rect/>
</svg>
");
}
|
{
doc.drain(|n| n.is_tag_name(id));
}
|
identifier_body
|
rm_elems.rs
|
// svgcleaner could help you to clean up your SVG files
// from unnecessary data.
// Copyright (C) 2012-2018 Evgeniy Reizner
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
use svgdom::Document;
use task::short::EId;
// TODO: to mod::utils
pub fn
|
(doc: &mut Document, id: EId) {
doc.drain(|n| n.is_tag_name(id));
}
#[cfg(test)]
mod tests {
use super::*;
use svgdom::{Document, ToStringWithOptions, ElementId};
macro_rules! test {
($name:ident, $id:expr, $in_text:expr, $out_text:expr) => (
#[test]
fn $name() {
let mut doc = Document::from_str($in_text).unwrap();
remove_element(&mut doc, $id);
assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text);
}
)
}
// TODO: this tests should be in svgdom
test!(rm_1, ElementId::Title,
"<svg>
<title/>
</svg>",
"<svg/>
");
test!(rm_2, ElementId::Title,
"<svg>
<title/>
<title/>
<rect/>
<title/>
</svg>",
"<svg>
<rect/>
</svg>
");
test!(rm_3, ElementId::Title,
"<svg>
<title>
<title/>
<rect/>
</title>
<rect/>
</svg>",
"<svg>
<rect/>
</svg>
");
}
|
remove_element
|
identifier_name
|
main.rs
|
use std::fs::File;
extern crate rustc_serialize;
extern crate docopt;
|
mod vm;
static USAGE: &'static str = "
Usage: norn_rust <file>
norn_rust (--help | --version)
Options:
-h, --help Show this message.
-v, --version Show version of norn_rust.
";
#[derive(RustcDecodable, Debug)]
struct Args {
arg_file: String,
flag_help: bool,
flag_version: bool
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
if args.flag_help {
println!("{}", USAGE);
return;
}
if args.flag_version {
println!("norn_rust 0.0.3");
return;
}
File::open(args.arg_file)
.map_err(|err| err.to_string())
.and_then(|file| {
vm::ir::programs::Program::parse_textual_bytecode(file)
.map_err(|err| err.to_string())
})
.and_then(|program| Ok(vm::execute(&program)))
.unwrap();
}
|
use docopt::Docopt;
|
random_line_split
|
main.rs
|
use std::fs::File;
extern crate rustc_serialize;
extern crate docopt;
use docopt::Docopt;
mod vm;
static USAGE: &'static str = "
Usage: norn_rust <file>
norn_rust (--help | --version)
Options:
-h, --help Show this message.
-v, --version Show version of norn_rust.
";
#[derive(RustcDecodable, Debug)]
struct Args {
arg_file: String,
flag_help: bool,
flag_version: bool
}
fn main()
|
})
.and_then(|program| Ok(vm::execute(&program)))
.unwrap();
}
|
{
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
if args.flag_help {
println!("{}", USAGE);
return;
}
if args.flag_version {
println!("norn_rust 0.0.3");
return;
}
File::open(args.arg_file)
.map_err(|err| err.to_string())
.and_then(|file| {
vm::ir::programs::Program::parse_textual_bytecode(file)
.map_err(|err| err.to_string())
|
identifier_body
|
main.rs
|
use std::fs::File;
extern crate rustc_serialize;
extern crate docopt;
use docopt::Docopt;
mod vm;
static USAGE: &'static str = "
Usage: norn_rust <file>
norn_rust (--help | --version)
Options:
-h, --help Show this message.
-v, --version Show version of norn_rust.
";
#[derive(RustcDecodable, Debug)]
struct
|
{
arg_file: String,
flag_help: bool,
flag_version: bool
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
if args.flag_help {
println!("{}", USAGE);
return;
}
if args.flag_version {
println!("norn_rust 0.0.3");
return;
}
File::open(args.arg_file)
.map_err(|err| err.to_string())
.and_then(|file| {
vm::ir::programs::Program::parse_textual_bytecode(file)
.map_err(|err| err.to_string())
})
.and_then(|program| Ok(vm::execute(&program)))
.unwrap();
}
|
Args
|
identifier_name
|
main.rs
|
use std::fs::File;
extern crate rustc_serialize;
extern crate docopt;
use docopt::Docopt;
mod vm;
static USAGE: &'static str = "
Usage: norn_rust <file>
norn_rust (--help | --version)
Options:
-h, --help Show this message.
-v, --version Show version of norn_rust.
";
#[derive(RustcDecodable, Debug)]
struct Args {
arg_file: String,
flag_help: bool,
flag_version: bool
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
if args.flag_help
|
if args.flag_version {
println!("norn_rust 0.0.3");
return;
}
File::open(args.arg_file)
.map_err(|err| err.to_string())
.and_then(|file| {
vm::ir::programs::Program::parse_textual_bytecode(file)
.map_err(|err| err.to_string())
})
.and_then(|program| Ok(vm::execute(&program)))
.unwrap();
}
|
{
println!("{}", USAGE);
return;
}
|
conditional_block
|
timestamp.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashSet;
use std::fmt;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct TimeStamp(u64);
const TSO_PHYSICAL_SHIFT_BITS: u64 = 18;
impl TimeStamp {
/// Create a time stamp from physical and logical components.
pub fn compose(physical: u64, logical: u64) -> TimeStamp {
TimeStamp((physical << TSO_PHYSICAL_SHIFT_BITS) + logical)
}
pub const fn zero() -> TimeStamp
|
pub const fn max() -> TimeStamp {
TimeStamp(std::u64::MAX)
}
pub const fn new(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
/// Extracts physical part of a timestamp, in milliseconds.
pub fn physical(self) -> u64 {
self.0 >> TSO_PHYSICAL_SHIFT_BITS
}
pub fn next(self) -> TimeStamp {
assert!(self.0 < u64::MAX);
TimeStamp(self.0 + 1)
}
pub fn prev(self) -> TimeStamp {
assert!(self.0 > 0);
TimeStamp(self.0 - 1)
}
pub fn incr(&mut self) -> &mut TimeStamp {
assert!(self.0 < u64::MAX);
self.0 += 1;
self
}
pub fn decr(&mut self) -> &mut TimeStamp {
assert!(self.0 > 0);
self.0 -= 1;
self
}
pub fn is_zero(self) -> bool {
self.0 == 0
}
pub fn is_max(self) -> bool {
self.0 == u64::MAX
}
pub fn into_inner(self) -> u64 {
self.0
}
pub fn physical_now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis() as u64
}
}
impl From<u64> for TimeStamp {
fn from(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
}
impl From<&u64> for TimeStamp {
fn from(ts: &u64) -> TimeStamp {
TimeStamp(*ts)
}
}
impl fmt::Display for TimeStamp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl slog::Value for TimeStamp {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
slog::Value::serialize(&self.0, record, key, serializer)
}
}
const TS_SET_USE_VEC_LIMIT: usize = 8;
/// A hybrid immutable set for timestamps.
#[derive(Debug, Clone, PartialEq)]
pub enum TsSet {
/// When the set is empty, avoid the useless cloning of Arc.
Empty,
/// `Vec` is suitable when the set is small or the set is barely used, and it doesn't worth
/// converting a `Vec` into a `HashSet`.
Vec(Arc<[TimeStamp]>),
/// `Set` is suitable when there are many timestamps **and** it will be queried multiple times.
Set(Arc<HashSet<TimeStamp>>),
}
impl Default for TsSet {
#[inline]
fn default() -> TsSet {
TsSet::Empty
}
}
impl TsSet {
/// Create a `TsSet` from the given vec of timestamps. It will select the proper internal
/// collection type according to the size.
#[inline]
pub fn new(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else if ts.len() <= TS_SET_USE_VEC_LIMIT {
// If there are too few elements in `ts`, use Vec directly instead of making a Set.
TsSet::Vec(ts.into())
} else {
TsSet::Set(Arc::new(ts.into_iter().collect()))
}
}
pub fn from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::new(ts)
}
pub fn vec_from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::vec(ts)
}
/// Create a `TsSet` from the given vec of timestamps, but it will be forced to use `Vec` as the
/// internal collection type. When it's sure that the set will be queried at most once, use this
/// is better than `TsSet::new`, since both the querying on `Vec` and the conversion from `Vec`
/// to `HashSet` is O(N).
#[inline]
pub fn vec(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else {
TsSet::Vec(ts.into())
}
}
/// Query whether the given timestamp is contained in the set.
#[inline]
pub fn contains(&self, ts: TimeStamp) -> bool {
match self {
TsSet::Empty => false,
TsSet::Vec(vec) => vec.contains(&ts),
TsSet::Set(set) => set.contains(&ts),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::Key;
#[test]
fn test_ts() {
let physical = 1568700549751;
let logical = 108;
let ts = TimeStamp::compose(physical, logical);
assert_eq!(ts, 411225436913926252.into());
let extracted_physical = ts.physical();
assert_eq!(extracted_physical, physical);
}
#[test]
fn test_split_ts() {
let k = b"k";
let ts = TimeStamp(123);
assert!(Key::split_on_ts_for(k).is_err());
let enc = Key::from_encoded_slice(k).append_ts(ts);
let res = Key::split_on_ts_for(enc.as_encoded()).unwrap();
assert_eq!(res, (k.as_ref(), ts));
}
#[test]
fn test_ts_set() {
let s = TsSet::new(vec![]);
assert_eq!(s, TsSet::Empty);
assert!(!s.contains(1.into()));
let s = TsSet::vec(vec![]);
assert_eq!(s, TsSet::Empty);
let s = TsSet::from_u64s(vec![1, 2]);
assert_eq!(s, TsSet::Vec(vec![1.into(), 2.into()].into()));
assert!(s.contains(1.into()));
assert!(s.contains(2.into()));
assert!(!s.contains(3.into()));
let s2 = TsSet::vec(vec![1.into(), 2.into()]);
assert_eq!(s2, s);
let big_ts_list: Vec<TimeStamp> =
(0..=TS_SET_USE_VEC_LIMIT as u64).map(Into::into).collect();
let s = TsSet::new(big_ts_list.clone());
assert_eq!(
s,
TsSet::Set(Arc::new(big_ts_list.clone().into_iter().collect()))
);
assert!(s.contains(1.into()));
assert!(s.contains((TS_SET_USE_VEC_LIMIT as u64).into()));
assert!(!s.contains((TS_SET_USE_VEC_LIMIT as u64 + 1).into()));
let s = TsSet::vec(big_ts_list.clone());
assert_eq!(s, TsSet::Vec(big_ts_list.into()));
}
}
|
{
TimeStamp(0)
}
|
identifier_body
|
timestamp.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashSet;
use std::fmt;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct TimeStamp(u64);
const TSO_PHYSICAL_SHIFT_BITS: u64 = 18;
impl TimeStamp {
/// Create a time stamp from physical and logical components.
pub fn compose(physical: u64, logical: u64) -> TimeStamp {
TimeStamp((physical << TSO_PHYSICAL_SHIFT_BITS) + logical)
}
pub const fn zero() -> TimeStamp {
TimeStamp(0)
}
pub const fn max() -> TimeStamp {
TimeStamp(std::u64::MAX)
}
pub const fn new(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
/// Extracts physical part of a timestamp, in milliseconds.
pub fn physical(self) -> u64 {
self.0 >> TSO_PHYSICAL_SHIFT_BITS
}
pub fn next(self) -> TimeStamp {
assert!(self.0 < u64::MAX);
TimeStamp(self.0 + 1)
}
pub fn prev(self) -> TimeStamp {
assert!(self.0 > 0);
TimeStamp(self.0 - 1)
}
pub fn incr(&mut self) -> &mut TimeStamp {
assert!(self.0 < u64::MAX);
self.0 += 1;
self
}
pub fn decr(&mut self) -> &mut TimeStamp {
assert!(self.0 > 0);
self.0 -= 1;
self
}
pub fn is_zero(self) -> bool {
self.0 == 0
}
pub fn is_max(self) -> bool {
self.0 == u64::MAX
}
pub fn into_inner(self) -> u64 {
self.0
}
pub fn physical_now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis() as u64
}
}
impl From<u64> for TimeStamp {
fn from(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
}
impl From<&u64> for TimeStamp {
fn from(ts: &u64) -> TimeStamp {
TimeStamp(*ts)
}
}
impl fmt::Display for TimeStamp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl slog::Value for TimeStamp {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
slog::Value::serialize(&self.0, record, key, serializer)
}
}
const TS_SET_USE_VEC_LIMIT: usize = 8;
/// A hybrid immutable set for timestamps.
#[derive(Debug, Clone, PartialEq)]
pub enum TsSet {
/// When the set is empty, avoid the useless cloning of Arc.
Empty,
/// `Vec` is suitable when the set is small or the set is barely used, and it doesn't worth
/// converting a `Vec` into a `HashSet`.
Vec(Arc<[TimeStamp]>),
/// `Set` is suitable when there are many timestamps **and** it will be queried multiple times.
Set(Arc<HashSet<TimeStamp>>),
}
impl Default for TsSet {
#[inline]
fn default() -> TsSet {
TsSet::Empty
}
}
impl TsSet {
/// Create a `TsSet` from the given vec of timestamps. It will select the proper internal
/// collection type according to the size.
#[inline]
pub fn new(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else if ts.len() <= TS_SET_USE_VEC_LIMIT {
// If there are too few elements in `ts`, use Vec directly instead of making a Set.
TsSet::Vec(ts.into())
} else {
TsSet::Set(Arc::new(ts.into_iter().collect()))
}
}
pub fn from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::new(ts)
}
pub fn vec_from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::vec(ts)
}
/// Create a `TsSet` from the given vec of timestamps, but it will be forced to use `Vec` as the
/// internal collection type. When it's sure that the set will be queried at most once, use this
/// is better than `TsSet::new`, since both the querying on `Vec` and the conversion from `Vec`
/// to `HashSet` is O(N).
#[inline]
pub fn vec(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else {
TsSet::Vec(ts.into())
}
}
/// Query whether the given timestamp is contained in the set.
#[inline]
pub fn contains(&self, ts: TimeStamp) -> bool {
match self {
TsSet::Empty => false,
TsSet::Vec(vec) => vec.contains(&ts),
TsSet::Set(set) => set.contains(&ts),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::Key;
#[test]
fn test_ts() {
let physical = 1568700549751;
let logical = 108;
let ts = TimeStamp::compose(physical, logical);
assert_eq!(ts, 411225436913926252.into());
let extracted_physical = ts.physical();
assert_eq!(extracted_physical, physical);
}
#[test]
fn test_split_ts() {
let k = b"k";
let ts = TimeStamp(123);
assert!(Key::split_on_ts_for(k).is_err());
let enc = Key::from_encoded_slice(k).append_ts(ts);
let res = Key::split_on_ts_for(enc.as_encoded()).unwrap();
assert_eq!(res, (k.as_ref(), ts));
}
#[test]
fn test_ts_set() {
let s = TsSet::new(vec![]);
assert_eq!(s, TsSet::Empty);
assert!(!s.contains(1.into()));
let s = TsSet::vec(vec![]);
assert_eq!(s, TsSet::Empty);
let s = TsSet::from_u64s(vec![1, 2]);
assert_eq!(s, TsSet::Vec(vec![1.into(), 2.into()].into()));
assert!(s.contains(1.into()));
assert!(s.contains(2.into()));
assert!(!s.contains(3.into()));
let s2 = TsSet::vec(vec![1.into(), 2.into()]);
assert_eq!(s2, s);
|
let big_ts_list: Vec<TimeStamp> =
(0..=TS_SET_USE_VEC_LIMIT as u64).map(Into::into).collect();
let s = TsSet::new(big_ts_list.clone());
assert_eq!(
s,
TsSet::Set(Arc::new(big_ts_list.clone().into_iter().collect()))
);
assert!(s.contains(1.into()));
assert!(s.contains((TS_SET_USE_VEC_LIMIT as u64).into()));
assert!(!s.contains((TS_SET_USE_VEC_LIMIT as u64 + 1).into()));
let s = TsSet::vec(big_ts_list.clone());
assert_eq!(s, TsSet::Vec(big_ts_list.into()));
}
}
|
random_line_split
|
|
timestamp.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashSet;
use std::fmt;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct TimeStamp(u64);
const TSO_PHYSICAL_SHIFT_BITS: u64 = 18;
impl TimeStamp {
/// Create a time stamp from physical and logical components.
pub fn compose(physical: u64, logical: u64) -> TimeStamp {
TimeStamp((physical << TSO_PHYSICAL_SHIFT_BITS) + logical)
}
pub const fn
|
() -> TimeStamp {
TimeStamp(0)
}
pub const fn max() -> TimeStamp {
TimeStamp(std::u64::MAX)
}
pub const fn new(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
/// Extracts physical part of a timestamp, in milliseconds.
pub fn physical(self) -> u64 {
self.0 >> TSO_PHYSICAL_SHIFT_BITS
}
pub fn next(self) -> TimeStamp {
assert!(self.0 < u64::MAX);
TimeStamp(self.0 + 1)
}
pub fn prev(self) -> TimeStamp {
assert!(self.0 > 0);
TimeStamp(self.0 - 1)
}
pub fn incr(&mut self) -> &mut TimeStamp {
assert!(self.0 < u64::MAX);
self.0 += 1;
self
}
pub fn decr(&mut self) -> &mut TimeStamp {
assert!(self.0 > 0);
self.0 -= 1;
self
}
pub fn is_zero(self) -> bool {
self.0 == 0
}
pub fn is_max(self) -> bool {
self.0 == u64::MAX
}
pub fn into_inner(self) -> u64 {
self.0
}
pub fn physical_now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis() as u64
}
}
impl From<u64> for TimeStamp {
fn from(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
}
impl From<&u64> for TimeStamp {
fn from(ts: &u64) -> TimeStamp {
TimeStamp(*ts)
}
}
impl fmt::Display for TimeStamp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl slog::Value for TimeStamp {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
slog::Value::serialize(&self.0, record, key, serializer)
}
}
const TS_SET_USE_VEC_LIMIT: usize = 8;
/// A hybrid immutable set for timestamps.
#[derive(Debug, Clone, PartialEq)]
pub enum TsSet {
/// When the set is empty, avoid the useless cloning of Arc.
Empty,
/// `Vec` is suitable when the set is small or the set is barely used, and it doesn't worth
/// converting a `Vec` into a `HashSet`.
Vec(Arc<[TimeStamp]>),
/// `Set` is suitable when there are many timestamps **and** it will be queried multiple times.
Set(Arc<HashSet<TimeStamp>>),
}
impl Default for TsSet {
#[inline]
fn default() -> TsSet {
TsSet::Empty
}
}
impl TsSet {
/// Create a `TsSet` from the given vec of timestamps. It will select the proper internal
/// collection type according to the size.
#[inline]
pub fn new(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else if ts.len() <= TS_SET_USE_VEC_LIMIT {
// If there are too few elements in `ts`, use Vec directly instead of making a Set.
TsSet::Vec(ts.into())
} else {
TsSet::Set(Arc::new(ts.into_iter().collect()))
}
}
pub fn from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::new(ts)
}
pub fn vec_from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::vec(ts)
}
/// Create a `TsSet` from the given vec of timestamps, but it will be forced to use `Vec` as the
/// internal collection type. When it's sure that the set will be queried at most once, use this
/// is better than `TsSet::new`, since both the querying on `Vec` and the conversion from `Vec`
/// to `HashSet` is O(N).
#[inline]
pub fn vec(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else {
TsSet::Vec(ts.into())
}
}
/// Query whether the given timestamp is contained in the set.
#[inline]
pub fn contains(&self, ts: TimeStamp) -> bool {
match self {
TsSet::Empty => false,
TsSet::Vec(vec) => vec.contains(&ts),
TsSet::Set(set) => set.contains(&ts),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::Key;
#[test]
fn test_ts() {
let physical = 1568700549751;
let logical = 108;
let ts = TimeStamp::compose(physical, logical);
assert_eq!(ts, 411225436913926252.into());
let extracted_physical = ts.physical();
assert_eq!(extracted_physical, physical);
}
#[test]
fn test_split_ts() {
let k = b"k";
let ts = TimeStamp(123);
assert!(Key::split_on_ts_for(k).is_err());
let enc = Key::from_encoded_slice(k).append_ts(ts);
let res = Key::split_on_ts_for(enc.as_encoded()).unwrap();
assert_eq!(res, (k.as_ref(), ts));
}
#[test]
fn test_ts_set() {
let s = TsSet::new(vec![]);
assert_eq!(s, TsSet::Empty);
assert!(!s.contains(1.into()));
let s = TsSet::vec(vec![]);
assert_eq!(s, TsSet::Empty);
let s = TsSet::from_u64s(vec![1, 2]);
assert_eq!(s, TsSet::Vec(vec![1.into(), 2.into()].into()));
assert!(s.contains(1.into()));
assert!(s.contains(2.into()));
assert!(!s.contains(3.into()));
let s2 = TsSet::vec(vec![1.into(), 2.into()]);
assert_eq!(s2, s);
let big_ts_list: Vec<TimeStamp> =
(0..=TS_SET_USE_VEC_LIMIT as u64).map(Into::into).collect();
let s = TsSet::new(big_ts_list.clone());
assert_eq!(
s,
TsSet::Set(Arc::new(big_ts_list.clone().into_iter().collect()))
);
assert!(s.contains(1.into()));
assert!(s.contains((TS_SET_USE_VEC_LIMIT as u64).into()));
assert!(!s.contains((TS_SET_USE_VEC_LIMIT as u64 + 1).into()));
let s = TsSet::vec(big_ts_list.clone());
assert_eq!(s, TsSet::Vec(big_ts_list.into()));
}
}
|
zero
|
identifier_name
|
timestamp.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashSet;
use std::fmt;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct TimeStamp(u64);
const TSO_PHYSICAL_SHIFT_BITS: u64 = 18;
impl TimeStamp {
/// Create a time stamp from physical and logical components.
pub fn compose(physical: u64, logical: u64) -> TimeStamp {
TimeStamp((physical << TSO_PHYSICAL_SHIFT_BITS) + logical)
}
pub const fn zero() -> TimeStamp {
TimeStamp(0)
}
pub const fn max() -> TimeStamp {
TimeStamp(std::u64::MAX)
}
pub const fn new(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
/// Extracts physical part of a timestamp, in milliseconds.
pub fn physical(self) -> u64 {
self.0 >> TSO_PHYSICAL_SHIFT_BITS
}
pub fn next(self) -> TimeStamp {
assert!(self.0 < u64::MAX);
TimeStamp(self.0 + 1)
}
pub fn prev(self) -> TimeStamp {
assert!(self.0 > 0);
TimeStamp(self.0 - 1)
}
pub fn incr(&mut self) -> &mut TimeStamp {
assert!(self.0 < u64::MAX);
self.0 += 1;
self
}
pub fn decr(&mut self) -> &mut TimeStamp {
assert!(self.0 > 0);
self.0 -= 1;
self
}
pub fn is_zero(self) -> bool {
self.0 == 0
}
pub fn is_max(self) -> bool {
self.0 == u64::MAX
}
pub fn into_inner(self) -> u64 {
self.0
}
pub fn physical_now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis() as u64
}
}
impl From<u64> for TimeStamp {
fn from(ts: u64) -> TimeStamp {
TimeStamp(ts)
}
}
impl From<&u64> for TimeStamp {
fn from(ts: &u64) -> TimeStamp {
TimeStamp(*ts)
}
}
impl fmt::Display for TimeStamp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl slog::Value for TimeStamp {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
slog::Value::serialize(&self.0, record, key, serializer)
}
}
const TS_SET_USE_VEC_LIMIT: usize = 8;
/// A hybrid immutable set for timestamps.
#[derive(Debug, Clone, PartialEq)]
pub enum TsSet {
/// When the set is empty, avoid the useless cloning of Arc.
Empty,
/// `Vec` is suitable when the set is small or the set is barely used, and it doesn't worth
/// converting a `Vec` into a `HashSet`.
Vec(Arc<[TimeStamp]>),
/// `Set` is suitable when there are many timestamps **and** it will be queried multiple times.
Set(Arc<HashSet<TimeStamp>>),
}
impl Default for TsSet {
#[inline]
fn default() -> TsSet {
TsSet::Empty
}
}
impl TsSet {
/// Create a `TsSet` from the given vec of timestamps. It will select the proper internal
/// collection type according to the size.
#[inline]
pub fn new(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty() {
TsSet::Empty
} else if ts.len() <= TS_SET_USE_VEC_LIMIT {
// If there are too few elements in `ts`, use Vec directly instead of making a Set.
TsSet::Vec(ts.into())
} else {
TsSet::Set(Arc::new(ts.into_iter().collect()))
}
}
pub fn from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::new(ts)
}
pub fn vec_from_u64s(ts: Vec<u64>) -> Self {
// This conversion is safe because TimeStamp is a transparent wrapper over u64.
let ts = unsafe { ::std::mem::transmute::<Vec<u64>, Vec<TimeStamp>>(ts) };
Self::vec(ts)
}
/// Create a `TsSet` from the given vec of timestamps, but it will be forced to use `Vec` as the
/// internal collection type. When it's sure that the set will be queried at most once, use this
/// is better than `TsSet::new`, since both the querying on `Vec` and the conversion from `Vec`
/// to `HashSet` is O(N).
#[inline]
pub fn vec(ts: Vec<TimeStamp>) -> Self {
if ts.is_empty()
|
else {
TsSet::Vec(ts.into())
}
}
/// Query whether the given timestamp is contained in the set.
#[inline]
pub fn contains(&self, ts: TimeStamp) -> bool {
match self {
TsSet::Empty => false,
TsSet::Vec(vec) => vec.contains(&ts),
TsSet::Set(set) => set.contains(&ts),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::Key;
#[test]
fn test_ts() {
let physical = 1568700549751;
let logical = 108;
let ts = TimeStamp::compose(physical, logical);
assert_eq!(ts, 411225436913926252.into());
let extracted_physical = ts.physical();
assert_eq!(extracted_physical, physical);
}
#[test]
fn test_split_ts() {
let k = b"k";
let ts = TimeStamp(123);
assert!(Key::split_on_ts_for(k).is_err());
let enc = Key::from_encoded_slice(k).append_ts(ts);
let res = Key::split_on_ts_for(enc.as_encoded()).unwrap();
assert_eq!(res, (k.as_ref(), ts));
}
#[test]
fn test_ts_set() {
let s = TsSet::new(vec![]);
assert_eq!(s, TsSet::Empty);
assert!(!s.contains(1.into()));
let s = TsSet::vec(vec![]);
assert_eq!(s, TsSet::Empty);
let s = TsSet::from_u64s(vec![1, 2]);
assert_eq!(s, TsSet::Vec(vec![1.into(), 2.into()].into()));
assert!(s.contains(1.into()));
assert!(s.contains(2.into()));
assert!(!s.contains(3.into()));
let s2 = TsSet::vec(vec![1.into(), 2.into()]);
assert_eq!(s2, s);
let big_ts_list: Vec<TimeStamp> =
(0..=TS_SET_USE_VEC_LIMIT as u64).map(Into::into).collect();
let s = TsSet::new(big_ts_list.clone());
assert_eq!(
s,
TsSet::Set(Arc::new(big_ts_list.clone().into_iter().collect()))
);
assert!(s.contains(1.into()));
assert!(s.contains((TS_SET_USE_VEC_LIMIT as u64).into()));
assert!(!s.contains((TS_SET_USE_VEC_LIMIT as u64 + 1).into()));
let s = TsSet::vec(big_ts_list.clone());
assert_eq!(s, TsSet::Vec(big_ts_list.into()));
}
}
|
{
TsSet::Empty
}
|
conditional_block
|
font_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use collections::hashmap::HashMap;
use font::SpecifiedFontStyle;
use gfx_font::FontHandleMethods;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use platform::font_list::FontListHandle;
use style::computed_values::{font_weight, font_style};
use servo_util::time::{ProfilerChan, profile};
use servo_util::time;
pub type FontFamilyMap = HashMap<~str, FontFamily>;
trait FontListHandleMethods {
fn get_available_families(&self, fctx: &FontContextHandle) -> FontFamilyMap;
fn load_variations_for_family(&self, family: &mut FontFamily);
fn get_last_resort_font_families() -> ~[~str];
}
|
family_map: FontFamilyMap,
handle: FontListHandle,
prof_chan: ProfilerChan,
}
impl FontList {
pub fn new(fctx: &FontContextHandle,
prof_chan: ProfilerChan)
-> FontList {
let handle = FontListHandle::new(fctx);
let mut list = FontList {
handle: handle,
family_map: HashMap::new(),
prof_chan: prof_chan.clone(),
};
list.refresh(fctx);
list
}
fn refresh(&mut self, _: &FontContextHandle) {
// TODO(Issue #186): don't refresh unless something actually
// changed. Does OSX have a notification for this event?
//
// Should font families with entries be invalidated/refreshed too?
profile(time::GfxRegenAvailableFontsCategory, self.prof_chan.clone(), || {
self.family_map = self.handle.get_available_families();
});
}
pub fn find_font_in_family<'a>(&'a mut self,
family_name: &~str,
style: &SpecifiedFontStyle) -> Option<&'a FontEntry> {
// TODO(Issue #188): look up localized font family names if canonical name not found
// look up canonical name
if self.family_map.contains_key(family_name) {
//FIXME call twice!(ksh8281)
debug!("FontList: Found font family with name={:s}", family_name.to_str());
let s: &'a mut FontFamily = self.family_map.get_mut(family_name);
// TODO(Issue #192: handle generic font families, like'serif' and'sans-serif'.
// if such family exists, try to match style to a font
let result = s.find_font_for_style(&mut self.handle, style);
if result.is_some() {
return result;
}
None
} else {
debug!("FontList: Couldn't find font family with name={:s}", family_name.to_str());
None
}
}
pub fn get_last_resort_font_families() -> ~[~str] {
let last_resort = FontListHandle::get_last_resort_font_families();
last_resort
}
}
// Holds a specific font family, and the various
pub struct FontFamily {
family_name: ~str,
entries: ~[FontEntry],
}
impl FontFamily {
pub fn new(family_name: &str) -> FontFamily {
FontFamily {
family_name: family_name.to_str(),
entries: ~[],
}
}
fn load_family_variations(&mut self, list: &FontListHandle) {
if self.entries.len() > 0 {
return
}
list.load_variations_for_family(self);
assert!(self.entries.len() > 0)
}
pub fn find_font_for_style<'a>(&'a mut self, list: &FontListHandle, style: &SpecifiedFontStyle)
-> Option<&'a FontEntry> {
self.load_family_variations(list);
// TODO(Issue #189): optimize lookup for
// regular/bold/italic/bolditalic with fixed offsets and a
// static decision table for fallback between these values.
// TODO(Issue #190): if not in the fast path above, do
// expensive matching of weights, etc.
for entry in self.entries.iter() {
if (style.weight.is_bold() == entry.is_bold()) &&
((style.style == font_style::italic) == entry.is_italic()) {
return Some(entry);
}
}
None
}
}
/// This struct summarizes an available font's features. In the future, this will include fiddly
/// settings such as special font table handling.
///
/// In the common case, each FontFamily will have a singleton FontEntry, or it will have the
/// standard four faces: Normal, Bold, Italic, BoldItalic.
pub struct FontEntry {
face_name: ~str,
priv weight: font_weight::T,
priv italic: bool,
handle: FontHandle,
// TODO: array of OpenType features, etc.
}
impl FontEntry {
pub fn new(handle: FontHandle) -> FontEntry {
FontEntry {
face_name: handle.face_name(),
weight: handle.boldness(),
italic: handle.is_italic(),
handle: handle
}
}
pub fn is_bold(&self) -> bool {
self.weight.is_bold()
}
pub fn is_italic(&self) -> bool {
self.italic
}
}
|
/// The platform-independent font list abstraction.
pub struct FontList {
|
random_line_split
|
font_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use collections::hashmap::HashMap;
use font::SpecifiedFontStyle;
use gfx_font::FontHandleMethods;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use platform::font_list::FontListHandle;
use style::computed_values::{font_weight, font_style};
use servo_util::time::{ProfilerChan, profile};
use servo_util::time;
pub type FontFamilyMap = HashMap<~str, FontFamily>;
trait FontListHandleMethods {
fn get_available_families(&self, fctx: &FontContextHandle) -> FontFamilyMap;
fn load_variations_for_family(&self, family: &mut FontFamily);
fn get_last_resort_font_families() -> ~[~str];
}
/// The platform-independent font list abstraction.
pub struct FontList {
family_map: FontFamilyMap,
handle: FontListHandle,
prof_chan: ProfilerChan,
}
impl FontList {
pub fn new(fctx: &FontContextHandle,
prof_chan: ProfilerChan)
-> FontList {
let handle = FontListHandle::new(fctx);
let mut list = FontList {
handle: handle,
family_map: HashMap::new(),
prof_chan: prof_chan.clone(),
};
list.refresh(fctx);
list
}
fn refresh(&mut self, _: &FontContextHandle) {
// TODO(Issue #186): don't refresh unless something actually
// changed. Does OSX have a notification for this event?
//
// Should font families with entries be invalidated/refreshed too?
profile(time::GfxRegenAvailableFontsCategory, self.prof_chan.clone(), || {
self.family_map = self.handle.get_available_families();
});
}
pub fn find_font_in_family<'a>(&'a mut self,
family_name: &~str,
style: &SpecifiedFontStyle) -> Option<&'a FontEntry> {
// TODO(Issue #188): look up localized font family names if canonical name not found
// look up canonical name
if self.family_map.contains_key(family_name) {
//FIXME call twice!(ksh8281)
debug!("FontList: Found font family with name={:s}", family_name.to_str());
let s: &'a mut FontFamily = self.family_map.get_mut(family_name);
// TODO(Issue #192: handle generic font families, like'serif' and'sans-serif'.
// if such family exists, try to match style to a font
let result = s.find_font_for_style(&mut self.handle, style);
if result.is_some() {
return result;
}
None
} else {
debug!("FontList: Couldn't find font family with name={:s}", family_name.to_str());
None
}
}
pub fn get_last_resort_font_families() -> ~[~str] {
let last_resort = FontListHandle::get_last_resort_font_families();
last_resort
}
}
// Holds a specific font family, and the various
pub struct
|
{
family_name: ~str,
entries: ~[FontEntry],
}
impl FontFamily {
pub fn new(family_name: &str) -> FontFamily {
FontFamily {
family_name: family_name.to_str(),
entries: ~[],
}
}
fn load_family_variations(&mut self, list: &FontListHandle) {
if self.entries.len() > 0 {
return
}
list.load_variations_for_family(self);
assert!(self.entries.len() > 0)
}
pub fn find_font_for_style<'a>(&'a mut self, list: &FontListHandle, style: &SpecifiedFontStyle)
-> Option<&'a FontEntry> {
self.load_family_variations(list);
// TODO(Issue #189): optimize lookup for
// regular/bold/italic/bolditalic with fixed offsets and a
// static decision table for fallback between these values.
// TODO(Issue #190): if not in the fast path above, do
// expensive matching of weights, etc.
for entry in self.entries.iter() {
if (style.weight.is_bold() == entry.is_bold()) &&
((style.style == font_style::italic) == entry.is_italic()) {
return Some(entry);
}
}
None
}
}
/// This struct summarizes an available font's features. In the future, this will include fiddly
/// settings such as special font table handling.
///
/// In the common case, each FontFamily will have a singleton FontEntry, or it will have the
/// standard four faces: Normal, Bold, Italic, BoldItalic.
pub struct FontEntry {
face_name: ~str,
priv weight: font_weight::T,
priv italic: bool,
handle: FontHandle,
// TODO: array of OpenType features, etc.
}
impl FontEntry {
pub fn new(handle: FontHandle) -> FontEntry {
FontEntry {
face_name: handle.face_name(),
weight: handle.boldness(),
italic: handle.is_italic(),
handle: handle
}
}
pub fn is_bold(&self) -> bool {
self.weight.is_bold()
}
pub fn is_italic(&self) -> bool {
self.italic
}
}
|
FontFamily
|
identifier_name
|
font_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use collections::hashmap::HashMap;
use font::SpecifiedFontStyle;
use gfx_font::FontHandleMethods;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use platform::font_list::FontListHandle;
use style::computed_values::{font_weight, font_style};
use servo_util::time::{ProfilerChan, profile};
use servo_util::time;
pub type FontFamilyMap = HashMap<~str, FontFamily>;
trait FontListHandleMethods {
fn get_available_families(&self, fctx: &FontContextHandle) -> FontFamilyMap;
fn load_variations_for_family(&self, family: &mut FontFamily);
fn get_last_resort_font_families() -> ~[~str];
}
/// The platform-independent font list abstraction.
pub struct FontList {
family_map: FontFamilyMap,
handle: FontListHandle,
prof_chan: ProfilerChan,
}
impl FontList {
pub fn new(fctx: &FontContextHandle,
prof_chan: ProfilerChan)
-> FontList {
let handle = FontListHandle::new(fctx);
let mut list = FontList {
handle: handle,
family_map: HashMap::new(),
prof_chan: prof_chan.clone(),
};
list.refresh(fctx);
list
}
fn refresh(&mut self, _: &FontContextHandle) {
// TODO(Issue #186): don't refresh unless something actually
// changed. Does OSX have a notification for this event?
//
// Should font families with entries be invalidated/refreshed too?
profile(time::GfxRegenAvailableFontsCategory, self.prof_chan.clone(), || {
self.family_map = self.handle.get_available_families();
});
}
pub fn find_font_in_family<'a>(&'a mut self,
family_name: &~str,
style: &SpecifiedFontStyle) -> Option<&'a FontEntry> {
// TODO(Issue #188): look up localized font family names if canonical name not found
// look up canonical name
if self.family_map.contains_key(family_name) {
//FIXME call twice!(ksh8281)
debug!("FontList: Found font family with name={:s}", family_name.to_str());
let s: &'a mut FontFamily = self.family_map.get_mut(family_name);
// TODO(Issue #192: handle generic font families, like'serif' and'sans-serif'.
// if such family exists, try to match style to a font
let result = s.find_font_for_style(&mut self.handle, style);
if result.is_some() {
return result;
}
None
} else {
debug!("FontList: Couldn't find font family with name={:s}", family_name.to_str());
None
}
}
pub fn get_last_resort_font_families() -> ~[~str] {
let last_resort = FontListHandle::get_last_resort_font_families();
last_resort
}
}
// Holds a specific font family, and the various
pub struct FontFamily {
family_name: ~str,
entries: ~[FontEntry],
}
impl FontFamily {
pub fn new(family_name: &str) -> FontFamily {
FontFamily {
family_name: family_name.to_str(),
entries: ~[],
}
}
fn load_family_variations(&mut self, list: &FontListHandle) {
if self.entries.len() > 0 {
return
}
list.load_variations_for_family(self);
assert!(self.entries.len() > 0)
}
pub fn find_font_for_style<'a>(&'a mut self, list: &FontListHandle, style: &SpecifiedFontStyle)
-> Option<&'a FontEntry> {
self.load_family_variations(list);
// TODO(Issue #189): optimize lookup for
// regular/bold/italic/bolditalic with fixed offsets and a
// static decision table for fallback between these values.
// TODO(Issue #190): if not in the fast path above, do
// expensive matching of weights, etc.
for entry in self.entries.iter() {
if (style.weight.is_bold() == entry.is_bold()) &&
((style.style == font_style::italic) == entry.is_italic())
|
}
None
}
}
/// This struct summarizes an available font's features. In the future, this will include fiddly
/// settings such as special font table handling.
///
/// In the common case, each FontFamily will have a singleton FontEntry, or it will have the
/// standard four faces: Normal, Bold, Italic, BoldItalic.
pub struct FontEntry {
face_name: ~str,
priv weight: font_weight::T,
priv italic: bool,
handle: FontHandle,
// TODO: array of OpenType features, etc.
}
impl FontEntry {
pub fn new(handle: FontHandle) -> FontEntry {
FontEntry {
face_name: handle.face_name(),
weight: handle.boldness(),
italic: handle.is_italic(),
handle: handle
}
}
pub fn is_bold(&self) -> bool {
self.weight.is_bold()
}
pub fn is_italic(&self) -> bool {
self.italic
}
}
|
{
return Some(entry);
}
|
conditional_block
|
font_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use collections::hashmap::HashMap;
use font::SpecifiedFontStyle;
use gfx_font::FontHandleMethods;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use platform::font_list::FontListHandle;
use style::computed_values::{font_weight, font_style};
use servo_util::time::{ProfilerChan, profile};
use servo_util::time;
pub type FontFamilyMap = HashMap<~str, FontFamily>;
trait FontListHandleMethods {
fn get_available_families(&self, fctx: &FontContextHandle) -> FontFamilyMap;
fn load_variations_for_family(&self, family: &mut FontFamily);
fn get_last_resort_font_families() -> ~[~str];
}
/// The platform-independent font list abstraction.
pub struct FontList {
family_map: FontFamilyMap,
handle: FontListHandle,
prof_chan: ProfilerChan,
}
impl FontList {
pub fn new(fctx: &FontContextHandle,
prof_chan: ProfilerChan)
-> FontList {
let handle = FontListHandle::new(fctx);
let mut list = FontList {
handle: handle,
family_map: HashMap::new(),
prof_chan: prof_chan.clone(),
};
list.refresh(fctx);
list
}
fn refresh(&mut self, _: &FontContextHandle) {
// TODO(Issue #186): don't refresh unless something actually
// changed. Does OSX have a notification for this event?
//
// Should font families with entries be invalidated/refreshed too?
profile(time::GfxRegenAvailableFontsCategory, self.prof_chan.clone(), || {
self.family_map = self.handle.get_available_families();
});
}
pub fn find_font_in_family<'a>(&'a mut self,
family_name: &~str,
style: &SpecifiedFontStyle) -> Option<&'a FontEntry> {
// TODO(Issue #188): look up localized font family names if canonical name not found
// look up canonical name
if self.family_map.contains_key(family_name) {
//FIXME call twice!(ksh8281)
debug!("FontList: Found font family with name={:s}", family_name.to_str());
let s: &'a mut FontFamily = self.family_map.get_mut(family_name);
// TODO(Issue #192: handle generic font families, like'serif' and'sans-serif'.
// if such family exists, try to match style to a font
let result = s.find_font_for_style(&mut self.handle, style);
if result.is_some() {
return result;
}
None
} else {
debug!("FontList: Couldn't find font family with name={:s}", family_name.to_str());
None
}
}
pub fn get_last_resort_font_families() -> ~[~str] {
let last_resort = FontListHandle::get_last_resort_font_families();
last_resort
}
}
// Holds a specific font family, and the various
pub struct FontFamily {
family_name: ~str,
entries: ~[FontEntry],
}
impl FontFamily {
pub fn new(family_name: &str) -> FontFamily {
FontFamily {
family_name: family_name.to_str(),
entries: ~[],
}
}
fn load_family_variations(&mut self, list: &FontListHandle) {
if self.entries.len() > 0 {
return
}
list.load_variations_for_family(self);
assert!(self.entries.len() > 0)
}
pub fn find_font_for_style<'a>(&'a mut self, list: &FontListHandle, style: &SpecifiedFontStyle)
-> Option<&'a FontEntry> {
self.load_family_variations(list);
// TODO(Issue #189): optimize lookup for
// regular/bold/italic/bolditalic with fixed offsets and a
// static decision table for fallback between these values.
// TODO(Issue #190): if not in the fast path above, do
// expensive matching of weights, etc.
for entry in self.entries.iter() {
if (style.weight.is_bold() == entry.is_bold()) &&
((style.style == font_style::italic) == entry.is_italic()) {
return Some(entry);
}
}
None
}
}
/// This struct summarizes an available font's features. In the future, this will include fiddly
/// settings such as special font table handling.
///
/// In the common case, each FontFamily will have a singleton FontEntry, or it will have the
/// standard four faces: Normal, Bold, Italic, BoldItalic.
pub struct FontEntry {
face_name: ~str,
priv weight: font_weight::T,
priv italic: bool,
handle: FontHandle,
// TODO: array of OpenType features, etc.
}
impl FontEntry {
pub fn new(handle: FontHandle) -> FontEntry {
FontEntry {
face_name: handle.face_name(),
weight: handle.boldness(),
italic: handle.is_italic(),
handle: handle
}
}
pub fn is_bold(&self) -> bool {
self.weight.is_bold()
}
pub fn is_italic(&self) -> bool
|
}
|
{
self.italic
}
|
identifier_body
|
pattern-in-closure.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct
|
{
x: int,
y: int
}
pub fn main() {
let f = |(x, _): (int, int)| println((x + 1).to_str());
let g = |Foo { x: x, y: _y }: Foo| println((x + 1).to_str());
f((2, 3));
g(Foo { x: 1, y: 2 });
}
|
Foo
|
identifier_name
|
pattern-in-closure.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Foo {
x: int,
y: int
}
pub fn main() {
let f = |(x, _): (int, int)| println((x + 1).to_str());
let g = |Foo { x: x, y: _y }: Foo| println((x + 1).to_str());
f((2, 3));
g(Foo { x: 1, y: 2 });
}
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
random_line_split
|
dns_query.rs
|
// http://rosettacode.org/wiki/DNS_query
#![feature(lookup_host)]
use std::io;
use std::net::{Ipv4Addr, Ipv6Addr};
#[derive(PartialEq)]
enum Ips {
IpV4(Ipv4Addr),
IpV6(Ipv6Addr),
}
fn get_ips(host: &str) -> io::Result<Vec<Ips>>
|
#[cfg(not(test))]
fn main() {
for ip in &(get_ips("www.kame.net").unwrap()) {
match ip {
&Ips::IpV4(ip) => println!("ip v4: {}", ip),
&Ips::IpV6(ip) => println!("ip v6: {}", ip)
}
}
}
#[cfg(test)]
mod test {
use super::{Ips, get_ips};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::str::FromStr;
#[test]
fn ipv4() {
let ip = Ips::IpV4(Ipv4Addr::from_str("203.178.141.194").unwrap());
assert!(get_ips("www.kame.net").unwrap().contains(&ip));
}
#[test]
#[ignore(cfg(target_os = "win32"))]
fn ipv6() {
let ip = Ips::IpV6(Ipv6Addr::from_str("2001:200:dff:fff1:216:3eff:feb1:44d7").unwrap());
assert!(get_ips("www.kame.net").unwrap().contains(&ip));
}
}
|
{
use std::net::{self, SocketAddr};
use Ips::{IpV4, IpV6};
let hosts = try!(net::lookup_host(host));
let ips: Vec<_> = hosts.filter_map(|h|
match h {
Ok(SocketAddr::V4(s_v4)) => Some(IpV4(s_v4.ip().clone())),
Ok(SocketAddr::V6(s_v6)) => Some(IpV6(s_v6.ip().clone())),
_ => None,
}
).collect();
Ok(ips)
}
|
identifier_body
|
dns_query.rs
|
// http://rosettacode.org/wiki/DNS_query
#![feature(lookup_host)]
use std::io;
use std::net::{Ipv4Addr, Ipv6Addr};
#[derive(PartialEq)]
enum Ips {
IpV4(Ipv4Addr),
IpV6(Ipv6Addr),
}
fn get_ips(host: &str) -> io::Result<Vec<Ips>> {
use std::net::{self, SocketAddr};
use Ips::{IpV4, IpV6};
let hosts = try!(net::lookup_host(host));
let ips: Vec<_> = hosts.filter_map(|h|
match h {
Ok(SocketAddr::V4(s_v4)) => Some(IpV4(s_v4.ip().clone())),
Ok(SocketAddr::V6(s_v6)) => Some(IpV6(s_v6.ip().clone())),
_ => None,
}
).collect();
|
Ok(ips)
}
#[cfg(not(test))]
fn main() {
for ip in &(get_ips("www.kame.net").unwrap()) {
match ip {
&Ips::IpV4(ip) => println!("ip v4: {}", ip),
&Ips::IpV6(ip) => println!("ip v6: {}", ip)
}
}
}
#[cfg(test)]
mod test {
use super::{Ips, get_ips};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::str::FromStr;
#[test]
fn ipv4() {
let ip = Ips::IpV4(Ipv4Addr::from_str("203.178.141.194").unwrap());
assert!(get_ips("www.kame.net").unwrap().contains(&ip));
}
#[test]
#[ignore(cfg(target_os = "win32"))]
fn ipv6() {
let ip = Ips::IpV6(Ipv6Addr::from_str("2001:200:dff:fff1:216:3eff:feb1:44d7").unwrap());
assert!(get_ips("www.kame.net").unwrap().contains(&ip));
}
}
|
random_line_split
|
|
dns_query.rs
|
// http://rosettacode.org/wiki/DNS_query
#![feature(lookup_host)]
use std::io;
use std::net::{Ipv4Addr, Ipv6Addr};
#[derive(PartialEq)]
enum
|
{
IpV4(Ipv4Addr),
IpV6(Ipv6Addr),
}
fn get_ips(host: &str) -> io::Result<Vec<Ips>> {
use std::net::{self, SocketAddr};
use Ips::{IpV4, IpV6};
let hosts = try!(net::lookup_host(host));
let ips: Vec<_> = hosts.filter_map(|h|
match h {
Ok(SocketAddr::V4(s_v4)) => Some(IpV4(s_v4.ip().clone())),
Ok(SocketAddr::V6(s_v6)) => Some(IpV6(s_v6.ip().clone())),
_ => None,
}
).collect();
Ok(ips)
}
#[cfg(not(test))]
fn main() {
for ip in &(get_ips("www.kame.net").unwrap()) {
match ip {
&Ips::IpV4(ip) => println!("ip v4: {}", ip),
&Ips::IpV6(ip) => println!("ip v6: {}", ip)
}
}
}
#[cfg(test)]
mod test {
use super::{Ips, get_ips};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::str::FromStr;
#[test]
fn ipv4() {
let ip = Ips::IpV4(Ipv4Addr::from_str("203.178.141.194").unwrap());
assert!(get_ips("www.kame.net").unwrap().contains(&ip));
}
#[test]
#[ignore(cfg(target_os = "win32"))]
fn ipv6() {
let ip = Ips::IpV6(Ipv6Addr::from_str("2001:200:dff:fff1:216:3eff:feb1:44d7").unwrap());
assert!(get_ips("www.kame.net").unwrap().contains(&ip));
}
}
|
Ips
|
identifier_name
|
token_client.rs
|
extern crate apns2;
extern crate argparse;
use argparse::{ArgumentParser, Store, StoreTrue};
use apns2::client::TokenClient;
use apns2::apns_token::APNSToken;
use apns2::payload::{Payload, APSAlert};
use apns2::notification::{Notification, NotificationOptions};
use std::fs::File;
use std::time::Duration;
// An example client connectiong to APNs with a JWT token
fn
|
() {
let mut der_file_location = String::new();
let mut team_id = String::new();
let mut key_id = String::new();
let mut device_token = String::new();
let mut message = String::from("Ch-check it out!");
let mut ca_certs = String::from("/etc/ssl/cert.pem");
let mut sandbox = false;
{
let mut ap = ArgumentParser::new();
ap.set_description("APNs token-based push");
ap.refer(&mut der_file_location).add_option(&["-e", "--der"], Store, "Private key file in DER format");
ap.refer(&mut team_id).add_option(&["-t", "--team_id"], Store, "APNs team ID");
ap.refer(&mut key_id).add_option(&["-k", "--key_id"], Store, "APNs key ID");
ap.refer(&mut device_token).add_option(&["-d", "--device_token"], Store, "APNs device token");
ap.refer(&mut message).add_option(&["-m", "--message"], Store, "Notification message");
ap.refer(&mut sandbox).add_option(&["-s", "--sandbox"], StoreTrue, "Use the development APNs servers");
ap.refer(&mut ca_certs).add_option(&["-c", "--ca_certs"], Store, "The system CA certificates PEM file");
ap.parse_args_or_exit();
}
// Read the private key from disk
let der_file = File::open(der_file_location).unwrap();
// Create a new token struct with the private key, team id and key id
// The token is valid for an hour and needs to be renewed after that
let apns_token = APNSToken::new(der_file, team_id.as_ref(), key_id.as_ref()).unwrap();
// Create a new client to APNs, giving the system CA certs
let client = TokenClient::new(sandbox, &ca_certs).unwrap();
// APNs payload
let payload = Payload::new(APSAlert::Plain(message), "default", Some(1u32), None, None);
let options = NotificationOptions {
..Default::default()
};
// Fire the request, return value is a mpsc rx channel
let request = client.push(Notification::new(payload, &device_token, options), apns_token.signature());
// Read the response and block maximum of 2000 milliseconds, throwing an exception for a timeout
let response = request.recv_timeout(Duration::from_millis(2000));
println!("{:?}", response);
}
|
main
|
identifier_name
|
token_client.rs
|
extern crate apns2;
extern crate argparse;
use argparse::{ArgumentParser, Store, StoreTrue};
use apns2::client::TokenClient;
use apns2::apns_token::APNSToken;
use apns2::payload::{Payload, APSAlert};
use apns2::notification::{Notification, NotificationOptions};
use std::fs::File;
use std::time::Duration;
// An example client connectiong to APNs with a JWT token
fn main()
|
}
// Read the private key from disk
let der_file = File::open(der_file_location).unwrap();
// Create a new token struct with the private key, team id and key id
// The token is valid for an hour and needs to be renewed after that
let apns_token = APNSToken::new(der_file, team_id.as_ref(), key_id.as_ref()).unwrap();
// Create a new client to APNs, giving the system CA certs
let client = TokenClient::new(sandbox, &ca_certs).unwrap();
// APNs payload
let payload = Payload::new(APSAlert::Plain(message), "default", Some(1u32), None, None);
let options = NotificationOptions {
..Default::default()
};
// Fire the request, return value is a mpsc rx channel
let request = client.push(Notification::new(payload, &device_token, options), apns_token.signature());
// Read the response and block maximum of 2000 milliseconds, throwing an exception for a timeout
let response = request.recv_timeout(Duration::from_millis(2000));
println!("{:?}", response);
}
|
{
let mut der_file_location = String::new();
let mut team_id = String::new();
let mut key_id = String::new();
let mut device_token = String::new();
let mut message = String::from("Ch-check it out!");
let mut ca_certs = String::from("/etc/ssl/cert.pem");
let mut sandbox = false;
{
let mut ap = ArgumentParser::new();
ap.set_description("APNs token-based push");
ap.refer(&mut der_file_location).add_option(&["-e", "--der"], Store, "Private key file in DER format");
ap.refer(&mut team_id).add_option(&["-t", "--team_id"], Store, "APNs team ID");
ap.refer(&mut key_id).add_option(&["-k", "--key_id"], Store, "APNs key ID");
ap.refer(&mut device_token).add_option(&["-d", "--device_token"], Store, "APNs device token");
ap.refer(&mut message).add_option(&["-m", "--message"], Store, "Notification message");
ap.refer(&mut sandbox).add_option(&["-s", "--sandbox"], StoreTrue, "Use the development APNs servers");
ap.refer(&mut ca_certs).add_option(&["-c", "--ca_certs"], Store, "The system CA certificates PEM file");
ap.parse_args_or_exit();
|
identifier_body
|
token_client.rs
|
extern crate apns2;
extern crate argparse;
use argparse::{ArgumentParser, Store, StoreTrue};
use apns2::client::TokenClient;
use apns2::apns_token::APNSToken;
use apns2::payload::{Payload, APSAlert};
use apns2::notification::{Notification, NotificationOptions};
use std::fs::File;
use std::time::Duration;
|
let mut key_id = String::new();
let mut device_token = String::new();
let mut message = String::from("Ch-check it out!");
let mut ca_certs = String::from("/etc/ssl/cert.pem");
let mut sandbox = false;
{
let mut ap = ArgumentParser::new();
ap.set_description("APNs token-based push");
ap.refer(&mut der_file_location).add_option(&["-e", "--der"], Store, "Private key file in DER format");
ap.refer(&mut team_id).add_option(&["-t", "--team_id"], Store, "APNs team ID");
ap.refer(&mut key_id).add_option(&["-k", "--key_id"], Store, "APNs key ID");
ap.refer(&mut device_token).add_option(&["-d", "--device_token"], Store, "APNs device token");
ap.refer(&mut message).add_option(&["-m", "--message"], Store, "Notification message");
ap.refer(&mut sandbox).add_option(&["-s", "--sandbox"], StoreTrue, "Use the development APNs servers");
ap.refer(&mut ca_certs).add_option(&["-c", "--ca_certs"], Store, "The system CA certificates PEM file");
ap.parse_args_or_exit();
}
// Read the private key from disk
let der_file = File::open(der_file_location).unwrap();
// Create a new token struct with the private key, team id and key id
// The token is valid for an hour and needs to be renewed after that
let apns_token = APNSToken::new(der_file, team_id.as_ref(), key_id.as_ref()).unwrap();
// Create a new client to APNs, giving the system CA certs
let client = TokenClient::new(sandbox, &ca_certs).unwrap();
// APNs payload
let payload = Payload::new(APSAlert::Plain(message), "default", Some(1u32), None, None);
let options = NotificationOptions {
..Default::default()
};
// Fire the request, return value is a mpsc rx channel
let request = client.push(Notification::new(payload, &device_token, options), apns_token.signature());
// Read the response and block maximum of 2000 milliseconds, throwing an exception for a timeout
let response = request.recv_timeout(Duration::from_millis(2000));
println!("{:?}", response);
}
|
// An example client connectiong to APNs with a JWT token
fn main() {
let mut der_file_location = String::new();
let mut team_id = String::new();
|
random_line_split
|
invalidation_map.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
use {Atom, LocalName, Namespace};
use context::QuirksMode;
use element_state::ElementState;
use selector_map::{MaybeCaseInsensitiveHashMap, SelectorMap, SelectorMapEntry};
use selector_parser::SelectorImpl;
use selectors::attr::NamespaceConstraint;
use selectors::parser::{Combinator, Component};
use selectors::parser::{Selector, SelectorIter, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use smallvec::SmallVec;
#[cfg(feature = "gecko")]
/// Gets the element state relevant to the given `:dir` pseudo-class selector.
pub fn dir_selector_to_state(s: &[u16]) -> ElementState {
use element_state::{IN_LTR_STATE, IN_RTL_STATE};
// Jump through some hoops to deal with our Box<[u16]> thing.
const LTR: [u16; 4] = [b'l' as u16, b't' as u16, b'r' as u16, 0];
const RTL: [u16; 4] = [b'r' as u16, b't' as u16, b'l' as u16, 0];
if LTR == *s {
IN_LTR_STATE
} else if RTL == *s {
IN_RTL_STATE
} else {
// :dir(something-random) is a valid selector, but shouldn't
// match anything.
ElementState::empty()
}
}
/// Mapping between (partial) CompoundSelectors (and the combinator to their
/// right) and the states and attributes they depend on.
///
/// In general, for all selectors in all applicable stylesheets of the form:
///
/// |a _ b _ c _ d _ e|
///
/// Where:
/// * |b| and |d| are simple selectors that depend on state (like :hover) or
/// attributes (like [attr...],.foo, or #foo).
/// * |a|, |c|, and |e| are arbitrary simple selectors that do not depend on
/// state or attributes.
///
/// We generate a Dependency for both |a _ b:X _| and |a _ b:X _ c _ d:Y _|,
/// even though those selectors may not appear on their own in any stylesheet.
/// This allows us to quickly scan through the dependency sites of all style
/// rules and determine the maximum effect that a given state or attribute
/// change may have on the style of elements in the document.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Dependency {
/// The dependency selector.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub selector: Selector<SelectorImpl>,
/// The offset into the selector that we should match on.
pub selector_offset: usize,
}
impl Dependency {
/// Returns the combinator to the right of the partial selector this
/// dependency represents.
///
/// TODO(emilio): Consider storing inline if it helps cache locality?
pub fn combinator(&self) -> Option<Combinator> {
if self.selector_offset == 0 {
return None;
}
Some(self.selector.combinator_at(self.selector_offset))
}
/// Whether this dependency affects the style of the element.
///
/// NOTE(emilio): pseudo-elements need to be here to account for eager
/// pseudos, since they just grab the style from the originating element.
///
/// TODO(emilio): We could look at the selector itself to see if it's an
/// eager pseudo, and return false here if not.
pub fn affects_self(&self) -> bool {
matches!(self.combinator(), None | Some(Combinator::PseudoElement))
}
/// Whether this dependency may affect style of any of our descendants.
pub fn affects_descendants(&self) -> bool {
matches!(self.combinator(), Some(Combinator::PseudoElement) |
Some(Combinator::Child) |
Some(Combinator::Descendant))
}
/// Whether this dependency may affect style of any of our later siblings.
pub fn affects_later_siblings(&self) -> bool {
matches!(self.combinator(), Some(Combinator::NextSibling) |
Some(Combinator::LaterSibling))
}
}
impl SelectorMapEntry for Dependency {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.selector.iter_from(self.selector_offset)
}
}
/// The same, but for state selectors, which can track more exactly what state
/// do they track.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct StateDependency {
/// The other dependency fields.
pub dep: Dependency,
/// The state this dependency is affected by.
pub state: ElementState,
}
impl SelectorMapEntry for StateDependency {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.dep.selector()
}
}
/// A map where we store invalidations.
///
/// This is slightly different to a SelectorMap, in the sense of that the same
/// selector may appear multiple times.
///
/// In particular, we want to lookup as few things as possible to get the fewer
/// selectors the better, so this looks up by id, class, or looks at the list of
/// state/other attribute affecting selectors.
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct InvalidationMap {
/// A map from a given class name to all the selectors with that class
/// selector.
pub class_to_selector: MaybeCaseInsensitiveHashMap<Atom, SelectorMap<Dependency>>,
/// A map from a given id to all the selectors with that ID in the
/// stylesheets currently applying to the document.
pub id_to_selector: MaybeCaseInsensitiveHashMap<Atom, SelectorMap<Dependency>>,
/// A map of all the state dependencies.
pub state_affecting_selectors: SelectorMap<StateDependency>,
/// A map of other attribute affecting selectors.
pub other_attribute_affecting_selectors: SelectorMap<Dependency>,
/// Whether there are attribute rules of the form `[class~="foo"]` that may
/// match. In that case, we need to look at
/// `other_attribute_affecting_selectors` too even if only the `class` has
/// changed.
pub has_class_attribute_selectors: bool,
/// Whether there are attribute rules of the form `[id|="foo"]` that may
/// match. In that case, we need to look at
/// `other_attribute_affecting_selectors` too even if only the `id` has
/// changed.
pub has_id_attribute_selectors: bool,
}
impl InvalidationMap {
/// Creates an empty `InvalidationMap`.
pub fn new() -> Self {
Self {
class_to_selector: MaybeCaseInsensitiveHashMap::new(),
id_to_selector: MaybeCaseInsensitiveHashMap::new(),
state_affecting_selectors: SelectorMap::new(),
other_attribute_affecting_selectors: SelectorMap::new(),
has_class_attribute_selectors: false,
has_id_attribute_selectors: false,
}
}
/// Returns the number of dependencies stored in the invalidation map.
pub fn len(&self) -> usize {
self.state_affecting_selectors.len() +
self.other_attribute_affecting_selectors.len() +
self.id_to_selector.iter().fold(0, |accum, (_, ref v)| {
accum + v.len()
}) +
self.class_to_selector.iter().fold(0, |accum, (_, ref v)| {
accum + v.len()
})
}
/// Adds a selector to this `InvalidationMap`.
pub fn note_selector(
&mut self,
selector: &Selector<SelectorImpl>,
quirks_mode: QuirksMode)
{
self.collect_invalidations_for(selector, quirks_mode)
}
/// Clears this map, leaving it empty.
pub fn clear(&mut self) {
self.class_to_selector.clear();
self.id_to_selector.clear();
self.state_affecting_selectors.clear();
self.other_attribute_affecting_selectors.clear();
self.has_id_attribute_selectors = false;
self.has_class_attribute_selectors = false;
}
fn collect_invalidations_for(
&mut self,
selector: &Selector<SelectorImpl>,
quirks_mode: QuirksMode)
{
debug!("InvalidationMap::collect_invalidations_for({:?})", selector);
let mut iter = selector.iter();
let mut combinator;
let mut index = 0;
loop {
let sequence_start = index;
let mut compound_visitor = CompoundSelectorDependencyCollector {
classes: SmallVec::new(),
ids: SmallVec::new(),
state: ElementState::empty(),
other_attributes: false,
has_id_attribute_selectors: false,
has_class_attribute_selectors: false,
};
// Visit all the simple selectors in this sequence.
//
// Note that this works because we can't have combinators nested
// inside simple selectors (i.e. in :not() or :-moz-any()).
//
// If we ever support that we'll need to visit nested complex
// selectors as well, in order to mark them as affecting descendants
// at least.
for ss in &mut iter {
ss.visit(&mut compound_visitor);
index += 1; // Account for the simple selector.
}
self.has_id_attribute_selectors |= compound_visitor.has_id_attribute_selectors;
self.has_class_attribute_selectors |= compound_visitor.has_class_attribute_selectors;
for class in compound_visitor.classes {
self.class_to_selector
.entry(class, quirks_mode)
.or_insert_with(SelectorMap::new)
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
for id in compound_visitor.ids {
self.id_to_selector
.entry(id, quirks_mode)
.or_insert_with(SelectorMap::new)
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
if!compound_visitor.state.is_empty() {
self.state_affecting_selectors
.insert(StateDependency {
dep: Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
},
state: compound_visitor.state,
}, quirks_mode);
}
if compound_visitor.other_attributes {
self.other_attribute_affecting_selectors
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
combinator = iter.next_sequence();
if combinator.is_none() {
break;
}
index += 1; // Account for the combinator.
}
}
}
/// A struct that collects invalidations for a given compound selector.
struct CompoundSelectorDependencyCollector {
/// The state this compound selector is affected by.
state: ElementState,
/// The classes this compound selector is affected by.
///
/// NB: This will be often a single class, but could be multiple in
/// presence of :not, :-moz-any,.foo.bar.baz, etc.
classes: SmallVec<[Atom; 5]>,
/// The IDs this compound selector is affected by.
///
/// NB: This will be almost always a single id, but could be multiple in
/// presence of :not, :-moz-any, #foo#bar, etc.
ids: SmallVec<[Atom; 5]>,
/// Whether it affects other attribute-dependent selectors that aren't ID or
/// class selectors (NB: We still set this to true in presence of [class] or
/// [id] attribute selectors).
other_attributes: bool,
/// Whether there were attribute selectors with the id attribute.
has_id_attribute_selectors: bool,
/// Whether there were attribute selectors with the class attribute.
has_class_attribute_selectors: bool,
}
impl SelectorVisitor for CompoundSelectorDependencyCollector {
type Impl = SelectorImpl;
fn visit_simple_selector(&mut self, s: &Component<SelectorImpl>) -> bool {
#[cfg(feature = "gecko")]
use selector_parser::NonTSPseudoClass;
match *s {
Component::ID(ref id) => {
self.ids.push(id.clone());
}
Component::Class(ref class) => {
self.classes.push(class.clone());
}
Component::NonTSPseudoClass(ref pc) => {
self.other_attributes |= pc.is_attr_based();
self.state |= match *pc {
#[cfg(feature = "gecko")]
NonTSPseudoClass::Dir(ref s) => {
dir_selector_to_state(s)
}
_ => pc.state_flag(),
};
}
_ => {}
}
true
}
fn visit_attribute_selector(
&mut self,
constraint: &NamespaceConstraint<&Namespace>,
_local_name: &LocalName,
local_name_lower: &LocalName,
) -> bool {
self.other_attributes = true;
let may_match_in_no_namespace = match *constraint {
NamespaceConstraint::Any => true,
NamespaceConstraint::Specific(ref ns) => ns.is_empty(),
};
if may_match_in_no_namespace {
self.has_id_attribute_selectors |= *local_name_lower == local_name!("id");
self.has_class_attribute_selectors |= *local_name_lower == local_name!("class");
}
true
}
}
|
//! Code for invalidations due to state or attribute changes.
|
random_line_split
|
invalidation_map.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Code for invalidations due to state or attribute changes.
use {Atom, LocalName, Namespace};
use context::QuirksMode;
use element_state::ElementState;
use selector_map::{MaybeCaseInsensitiveHashMap, SelectorMap, SelectorMapEntry};
use selector_parser::SelectorImpl;
use selectors::attr::NamespaceConstraint;
use selectors::parser::{Combinator, Component};
use selectors::parser::{Selector, SelectorIter, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use smallvec::SmallVec;
#[cfg(feature = "gecko")]
/// Gets the element state relevant to the given `:dir` pseudo-class selector.
pub fn dir_selector_to_state(s: &[u16]) -> ElementState {
use element_state::{IN_LTR_STATE, IN_RTL_STATE};
// Jump through some hoops to deal with our Box<[u16]> thing.
const LTR: [u16; 4] = [b'l' as u16, b't' as u16, b'r' as u16, 0];
const RTL: [u16; 4] = [b'r' as u16, b't' as u16, b'l' as u16, 0];
if LTR == *s {
IN_LTR_STATE
} else if RTL == *s {
IN_RTL_STATE
} else {
// :dir(something-random) is a valid selector, but shouldn't
// match anything.
ElementState::empty()
}
}
/// Mapping between (partial) CompoundSelectors (and the combinator to their
/// right) and the states and attributes they depend on.
///
/// In general, for all selectors in all applicable stylesheets of the form:
///
/// |a _ b _ c _ d _ e|
///
/// Where:
/// * |b| and |d| are simple selectors that depend on state (like :hover) or
/// attributes (like [attr...],.foo, or #foo).
/// * |a|, |c|, and |e| are arbitrary simple selectors that do not depend on
/// state or attributes.
///
/// We generate a Dependency for both |a _ b:X _| and |a _ b:X _ c _ d:Y _|,
/// even though those selectors may not appear on their own in any stylesheet.
/// This allows us to quickly scan through the dependency sites of all style
/// rules and determine the maximum effect that a given state or attribute
/// change may have on the style of elements in the document.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Dependency {
/// The dependency selector.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub selector: Selector<SelectorImpl>,
/// The offset into the selector that we should match on.
pub selector_offset: usize,
}
impl Dependency {
/// Returns the combinator to the right of the partial selector this
/// dependency represents.
///
/// TODO(emilio): Consider storing inline if it helps cache locality?
pub fn combinator(&self) -> Option<Combinator> {
if self.selector_offset == 0 {
return None;
}
Some(self.selector.combinator_at(self.selector_offset))
}
/// Whether this dependency affects the style of the element.
///
/// NOTE(emilio): pseudo-elements need to be here to account for eager
/// pseudos, since they just grab the style from the originating element.
///
/// TODO(emilio): We could look at the selector itself to see if it's an
/// eager pseudo, and return false here if not.
pub fn affects_self(&self) -> bool {
matches!(self.combinator(), None | Some(Combinator::PseudoElement))
}
/// Whether this dependency may affect style of any of our descendants.
pub fn affects_descendants(&self) -> bool {
matches!(self.combinator(), Some(Combinator::PseudoElement) |
Some(Combinator::Child) |
Some(Combinator::Descendant))
}
/// Whether this dependency may affect style of any of our later siblings.
pub fn
|
(&self) -> bool {
matches!(self.combinator(), Some(Combinator::NextSibling) |
Some(Combinator::LaterSibling))
}
}
impl SelectorMapEntry for Dependency {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.selector.iter_from(self.selector_offset)
}
}
/// The same, but for state selectors, which can track more exactly what state
/// do they track.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct StateDependency {
/// The other dependency fields.
pub dep: Dependency,
/// The state this dependency is affected by.
pub state: ElementState,
}
impl SelectorMapEntry for StateDependency {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.dep.selector()
}
}
/// A map where we store invalidations.
///
/// This is slightly different to a SelectorMap, in the sense of that the same
/// selector may appear multiple times.
///
/// In particular, we want to lookup as few things as possible to get the fewer
/// selectors the better, so this looks up by id, class, or looks at the list of
/// state/other attribute affecting selectors.
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct InvalidationMap {
/// A map from a given class name to all the selectors with that class
/// selector.
pub class_to_selector: MaybeCaseInsensitiveHashMap<Atom, SelectorMap<Dependency>>,
/// A map from a given id to all the selectors with that ID in the
/// stylesheets currently applying to the document.
pub id_to_selector: MaybeCaseInsensitiveHashMap<Atom, SelectorMap<Dependency>>,
/// A map of all the state dependencies.
pub state_affecting_selectors: SelectorMap<StateDependency>,
/// A map of other attribute affecting selectors.
pub other_attribute_affecting_selectors: SelectorMap<Dependency>,
/// Whether there are attribute rules of the form `[class~="foo"]` that may
/// match. In that case, we need to look at
/// `other_attribute_affecting_selectors` too even if only the `class` has
/// changed.
pub has_class_attribute_selectors: bool,
/// Whether there are attribute rules of the form `[id|="foo"]` that may
/// match. In that case, we need to look at
/// `other_attribute_affecting_selectors` too even if only the `id` has
/// changed.
pub has_id_attribute_selectors: bool,
}
impl InvalidationMap {
/// Creates an empty `InvalidationMap`.
pub fn new() -> Self {
Self {
class_to_selector: MaybeCaseInsensitiveHashMap::new(),
id_to_selector: MaybeCaseInsensitiveHashMap::new(),
state_affecting_selectors: SelectorMap::new(),
other_attribute_affecting_selectors: SelectorMap::new(),
has_class_attribute_selectors: false,
has_id_attribute_selectors: false,
}
}
/// Returns the number of dependencies stored in the invalidation map.
pub fn len(&self) -> usize {
self.state_affecting_selectors.len() +
self.other_attribute_affecting_selectors.len() +
self.id_to_selector.iter().fold(0, |accum, (_, ref v)| {
accum + v.len()
}) +
self.class_to_selector.iter().fold(0, |accum, (_, ref v)| {
accum + v.len()
})
}
/// Adds a selector to this `InvalidationMap`.
pub fn note_selector(
&mut self,
selector: &Selector<SelectorImpl>,
quirks_mode: QuirksMode)
{
self.collect_invalidations_for(selector, quirks_mode)
}
/// Clears this map, leaving it empty.
pub fn clear(&mut self) {
self.class_to_selector.clear();
self.id_to_selector.clear();
self.state_affecting_selectors.clear();
self.other_attribute_affecting_selectors.clear();
self.has_id_attribute_selectors = false;
self.has_class_attribute_selectors = false;
}
fn collect_invalidations_for(
&mut self,
selector: &Selector<SelectorImpl>,
quirks_mode: QuirksMode)
{
debug!("InvalidationMap::collect_invalidations_for({:?})", selector);
let mut iter = selector.iter();
let mut combinator;
let mut index = 0;
loop {
let sequence_start = index;
let mut compound_visitor = CompoundSelectorDependencyCollector {
classes: SmallVec::new(),
ids: SmallVec::new(),
state: ElementState::empty(),
other_attributes: false,
has_id_attribute_selectors: false,
has_class_attribute_selectors: false,
};
// Visit all the simple selectors in this sequence.
//
// Note that this works because we can't have combinators nested
// inside simple selectors (i.e. in :not() or :-moz-any()).
//
// If we ever support that we'll need to visit nested complex
// selectors as well, in order to mark them as affecting descendants
// at least.
for ss in &mut iter {
ss.visit(&mut compound_visitor);
index += 1; // Account for the simple selector.
}
self.has_id_attribute_selectors |= compound_visitor.has_id_attribute_selectors;
self.has_class_attribute_selectors |= compound_visitor.has_class_attribute_selectors;
for class in compound_visitor.classes {
self.class_to_selector
.entry(class, quirks_mode)
.or_insert_with(SelectorMap::new)
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
for id in compound_visitor.ids {
self.id_to_selector
.entry(id, quirks_mode)
.or_insert_with(SelectorMap::new)
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
if!compound_visitor.state.is_empty() {
self.state_affecting_selectors
.insert(StateDependency {
dep: Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
},
state: compound_visitor.state,
}, quirks_mode);
}
if compound_visitor.other_attributes {
self.other_attribute_affecting_selectors
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
combinator = iter.next_sequence();
if combinator.is_none() {
break;
}
index += 1; // Account for the combinator.
}
}
}
/// A struct that collects invalidations for a given compound selector.
struct CompoundSelectorDependencyCollector {
/// The state this compound selector is affected by.
state: ElementState,
/// The classes this compound selector is affected by.
///
/// NB: This will be often a single class, but could be multiple in
/// presence of :not, :-moz-any,.foo.bar.baz, etc.
classes: SmallVec<[Atom; 5]>,
/// The IDs this compound selector is affected by.
///
/// NB: This will be almost always a single id, but could be multiple in
/// presence of :not, :-moz-any, #foo#bar, etc.
ids: SmallVec<[Atom; 5]>,
/// Whether it affects other attribute-dependent selectors that aren't ID or
/// class selectors (NB: We still set this to true in presence of [class] or
/// [id] attribute selectors).
other_attributes: bool,
/// Whether there were attribute selectors with the id attribute.
has_id_attribute_selectors: bool,
/// Whether there were attribute selectors with the class attribute.
has_class_attribute_selectors: bool,
}
impl SelectorVisitor for CompoundSelectorDependencyCollector {
type Impl = SelectorImpl;
fn visit_simple_selector(&mut self, s: &Component<SelectorImpl>) -> bool {
#[cfg(feature = "gecko")]
use selector_parser::NonTSPseudoClass;
match *s {
Component::ID(ref id) => {
self.ids.push(id.clone());
}
Component::Class(ref class) => {
self.classes.push(class.clone());
}
Component::NonTSPseudoClass(ref pc) => {
self.other_attributes |= pc.is_attr_based();
self.state |= match *pc {
#[cfg(feature = "gecko")]
NonTSPseudoClass::Dir(ref s) => {
dir_selector_to_state(s)
}
_ => pc.state_flag(),
};
}
_ => {}
}
true
}
fn visit_attribute_selector(
&mut self,
constraint: &NamespaceConstraint<&Namespace>,
_local_name: &LocalName,
local_name_lower: &LocalName,
) -> bool {
self.other_attributes = true;
let may_match_in_no_namespace = match *constraint {
NamespaceConstraint::Any => true,
NamespaceConstraint::Specific(ref ns) => ns.is_empty(),
};
if may_match_in_no_namespace {
self.has_id_attribute_selectors |= *local_name_lower == local_name!("id");
self.has_class_attribute_selectors |= *local_name_lower == local_name!("class");
}
true
}
}
|
affects_later_siblings
|
identifier_name
|
invalidation_map.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Code for invalidations due to state or attribute changes.
use {Atom, LocalName, Namespace};
use context::QuirksMode;
use element_state::ElementState;
use selector_map::{MaybeCaseInsensitiveHashMap, SelectorMap, SelectorMapEntry};
use selector_parser::SelectorImpl;
use selectors::attr::NamespaceConstraint;
use selectors::parser::{Combinator, Component};
use selectors::parser::{Selector, SelectorIter, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use smallvec::SmallVec;
#[cfg(feature = "gecko")]
/// Gets the element state relevant to the given `:dir` pseudo-class selector.
pub fn dir_selector_to_state(s: &[u16]) -> ElementState {
use element_state::{IN_LTR_STATE, IN_RTL_STATE};
// Jump through some hoops to deal with our Box<[u16]> thing.
const LTR: [u16; 4] = [b'l' as u16, b't' as u16, b'r' as u16, 0];
const RTL: [u16; 4] = [b'r' as u16, b't' as u16, b'l' as u16, 0];
if LTR == *s {
IN_LTR_STATE
} else if RTL == *s {
IN_RTL_STATE
} else {
// :dir(something-random) is a valid selector, but shouldn't
// match anything.
ElementState::empty()
}
}
/// Mapping between (partial) CompoundSelectors (and the combinator to their
/// right) and the states and attributes they depend on.
///
/// In general, for all selectors in all applicable stylesheets of the form:
///
/// |a _ b _ c _ d _ e|
///
/// Where:
/// * |b| and |d| are simple selectors that depend on state (like :hover) or
/// attributes (like [attr...],.foo, or #foo).
/// * |a|, |c|, and |e| are arbitrary simple selectors that do not depend on
/// state or attributes.
///
/// We generate a Dependency for both |a _ b:X _| and |a _ b:X _ c _ d:Y _|,
/// even though those selectors may not appear on their own in any stylesheet.
/// This allows us to quickly scan through the dependency sites of all style
/// rules and determine the maximum effect that a given state or attribute
/// change may have on the style of elements in the document.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Dependency {
/// The dependency selector.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub selector: Selector<SelectorImpl>,
/// The offset into the selector that we should match on.
pub selector_offset: usize,
}
impl Dependency {
/// Returns the combinator to the right of the partial selector this
/// dependency represents.
///
/// TODO(emilio): Consider storing inline if it helps cache locality?
pub fn combinator(&self) -> Option<Combinator> {
if self.selector_offset == 0 {
return None;
}
Some(self.selector.combinator_at(self.selector_offset))
}
/// Whether this dependency affects the style of the element.
///
/// NOTE(emilio): pseudo-elements need to be here to account for eager
/// pseudos, since they just grab the style from the originating element.
///
/// TODO(emilio): We could look at the selector itself to see if it's an
/// eager pseudo, and return false here if not.
pub fn affects_self(&self) -> bool {
matches!(self.combinator(), None | Some(Combinator::PseudoElement))
}
/// Whether this dependency may affect style of any of our descendants.
pub fn affects_descendants(&self) -> bool {
matches!(self.combinator(), Some(Combinator::PseudoElement) |
Some(Combinator::Child) |
Some(Combinator::Descendant))
}
/// Whether this dependency may affect style of any of our later siblings.
pub fn affects_later_siblings(&self) -> bool {
matches!(self.combinator(), Some(Combinator::NextSibling) |
Some(Combinator::LaterSibling))
}
}
impl SelectorMapEntry for Dependency {
fn selector(&self) -> SelectorIter<SelectorImpl> {
self.selector.iter_from(self.selector_offset)
}
}
/// The same, but for state selectors, which can track more exactly what state
/// do they track.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct StateDependency {
/// The other dependency fields.
pub dep: Dependency,
/// The state this dependency is affected by.
pub state: ElementState,
}
impl SelectorMapEntry for StateDependency {
fn selector(&self) -> SelectorIter<SelectorImpl>
|
}
/// A map where we store invalidations.
///
/// This is slightly different to a SelectorMap, in the sense of that the same
/// selector may appear multiple times.
///
/// In particular, we want to lookup as few things as possible to get the fewer
/// selectors the better, so this looks up by id, class, or looks at the list of
/// state/other attribute affecting selectors.
#[derive(Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct InvalidationMap {
/// A map from a given class name to all the selectors with that class
/// selector.
pub class_to_selector: MaybeCaseInsensitiveHashMap<Atom, SelectorMap<Dependency>>,
/// A map from a given id to all the selectors with that ID in the
/// stylesheets currently applying to the document.
pub id_to_selector: MaybeCaseInsensitiveHashMap<Atom, SelectorMap<Dependency>>,
/// A map of all the state dependencies.
pub state_affecting_selectors: SelectorMap<StateDependency>,
/// A map of other attribute affecting selectors.
pub other_attribute_affecting_selectors: SelectorMap<Dependency>,
/// Whether there are attribute rules of the form `[class~="foo"]` that may
/// match. In that case, we need to look at
/// `other_attribute_affecting_selectors` too even if only the `class` has
/// changed.
pub has_class_attribute_selectors: bool,
/// Whether there are attribute rules of the form `[id|="foo"]` that may
/// match. In that case, we need to look at
/// `other_attribute_affecting_selectors` too even if only the `id` has
/// changed.
pub has_id_attribute_selectors: bool,
}
impl InvalidationMap {
/// Creates an empty `InvalidationMap`.
pub fn new() -> Self {
Self {
class_to_selector: MaybeCaseInsensitiveHashMap::new(),
id_to_selector: MaybeCaseInsensitiveHashMap::new(),
state_affecting_selectors: SelectorMap::new(),
other_attribute_affecting_selectors: SelectorMap::new(),
has_class_attribute_selectors: false,
has_id_attribute_selectors: false,
}
}
/// Returns the number of dependencies stored in the invalidation map.
pub fn len(&self) -> usize {
self.state_affecting_selectors.len() +
self.other_attribute_affecting_selectors.len() +
self.id_to_selector.iter().fold(0, |accum, (_, ref v)| {
accum + v.len()
}) +
self.class_to_selector.iter().fold(0, |accum, (_, ref v)| {
accum + v.len()
})
}
/// Adds a selector to this `InvalidationMap`.
pub fn note_selector(
&mut self,
selector: &Selector<SelectorImpl>,
quirks_mode: QuirksMode)
{
self.collect_invalidations_for(selector, quirks_mode)
}
/// Clears this map, leaving it empty.
pub fn clear(&mut self) {
self.class_to_selector.clear();
self.id_to_selector.clear();
self.state_affecting_selectors.clear();
self.other_attribute_affecting_selectors.clear();
self.has_id_attribute_selectors = false;
self.has_class_attribute_selectors = false;
}
fn collect_invalidations_for(
&mut self,
selector: &Selector<SelectorImpl>,
quirks_mode: QuirksMode)
{
debug!("InvalidationMap::collect_invalidations_for({:?})", selector);
let mut iter = selector.iter();
let mut combinator;
let mut index = 0;
loop {
let sequence_start = index;
let mut compound_visitor = CompoundSelectorDependencyCollector {
classes: SmallVec::new(),
ids: SmallVec::new(),
state: ElementState::empty(),
other_attributes: false,
has_id_attribute_selectors: false,
has_class_attribute_selectors: false,
};
// Visit all the simple selectors in this sequence.
//
// Note that this works because we can't have combinators nested
// inside simple selectors (i.e. in :not() or :-moz-any()).
//
// If we ever support that we'll need to visit nested complex
// selectors as well, in order to mark them as affecting descendants
// at least.
for ss in &mut iter {
ss.visit(&mut compound_visitor);
index += 1; // Account for the simple selector.
}
self.has_id_attribute_selectors |= compound_visitor.has_id_attribute_selectors;
self.has_class_attribute_selectors |= compound_visitor.has_class_attribute_selectors;
for class in compound_visitor.classes {
self.class_to_selector
.entry(class, quirks_mode)
.or_insert_with(SelectorMap::new)
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
for id in compound_visitor.ids {
self.id_to_selector
.entry(id, quirks_mode)
.or_insert_with(SelectorMap::new)
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
if!compound_visitor.state.is_empty() {
self.state_affecting_selectors
.insert(StateDependency {
dep: Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
},
state: compound_visitor.state,
}, quirks_mode);
}
if compound_visitor.other_attributes {
self.other_attribute_affecting_selectors
.insert(Dependency {
selector: selector.clone(),
selector_offset: sequence_start,
}, quirks_mode);
}
combinator = iter.next_sequence();
if combinator.is_none() {
break;
}
index += 1; // Account for the combinator.
}
}
}
/// A struct that collects invalidations for a given compound selector.
struct CompoundSelectorDependencyCollector {
/// The state this compound selector is affected by.
state: ElementState,
/// The classes this compound selector is affected by.
///
/// NB: This will be often a single class, but could be multiple in
/// presence of :not, :-moz-any,.foo.bar.baz, etc.
classes: SmallVec<[Atom; 5]>,
/// The IDs this compound selector is affected by.
///
/// NB: This will be almost always a single id, but could be multiple in
/// presence of :not, :-moz-any, #foo#bar, etc.
ids: SmallVec<[Atom; 5]>,
/// Whether it affects other attribute-dependent selectors that aren't ID or
/// class selectors (NB: We still set this to true in presence of [class] or
/// [id] attribute selectors).
other_attributes: bool,
/// Whether there were attribute selectors with the id attribute.
has_id_attribute_selectors: bool,
/// Whether there were attribute selectors with the class attribute.
has_class_attribute_selectors: bool,
}
impl SelectorVisitor for CompoundSelectorDependencyCollector {
type Impl = SelectorImpl;
fn visit_simple_selector(&mut self, s: &Component<SelectorImpl>) -> bool {
#[cfg(feature = "gecko")]
use selector_parser::NonTSPseudoClass;
match *s {
Component::ID(ref id) => {
self.ids.push(id.clone());
}
Component::Class(ref class) => {
self.classes.push(class.clone());
}
Component::NonTSPseudoClass(ref pc) => {
self.other_attributes |= pc.is_attr_based();
self.state |= match *pc {
#[cfg(feature = "gecko")]
NonTSPseudoClass::Dir(ref s) => {
dir_selector_to_state(s)
}
_ => pc.state_flag(),
};
}
_ => {}
}
true
}
fn visit_attribute_selector(
&mut self,
constraint: &NamespaceConstraint<&Namespace>,
_local_name: &LocalName,
local_name_lower: &LocalName,
) -> bool {
self.other_attributes = true;
let may_match_in_no_namespace = match *constraint {
NamespaceConstraint::Any => true,
NamespaceConstraint::Specific(ref ns) => ns.is_empty(),
};
if may_match_in_no_namespace {
self.has_id_attribute_selectors |= *local_name_lower == local_name!("id");
self.has_class_attribute_selectors |= *local_name_lower == local_name!("class");
}
true
}
}
|
{
self.dep.selector()
}
|
identifier_body
|
main.rs
|
extern crate nanomsg;
use nanomsg::{Socket, Protocol};
use std::io::{Read, Write};
const ADDRESS: &'static str = "ipc:///tmp/a.ipc";
fn main() {
let mut socket = Socket::new(Protocol::Rep).unwrap();
println!("Connecting to address '{}'", ADDRESS);
let mut endpoint = socket.bind(ADDRESS).unwrap();
let mut request = String::new();
loop {
println!("Waiting for a message");
socket.read_to_string(&mut request).expect("Failed to read request");
println!("Received '{}'", request);
match request.as_ref() {
"PING" => {
reply(&mut socket, "PONG");
}
"STOP" => {
reply(&mut socket, "OK");
println!("Shutting down");
break;
},
_ => reply(&mut socket, "UNKNOWN REQUEST")
}
request.clear();
}
endpoint.shutdown().expect("Failed to shutdown gracefully");
}
fn
|
(socket: &mut Socket, reply: &str) {
socket.write_all(reply.as_bytes()).expect("Failed to send reply");
println!("Replied with '{}'", reply)
}
|
reply
|
identifier_name
|
main.rs
|
extern crate nanomsg;
use nanomsg::{Socket, Protocol};
use std::io::{Read, Write};
const ADDRESS: &'static str = "ipc:///tmp/a.ipc";
fn main() {
let mut socket = Socket::new(Protocol::Rep).unwrap();
println!("Connecting to address '{}'", ADDRESS);
let mut endpoint = socket.bind(ADDRESS).unwrap();
let mut request = String::new();
loop {
println!("Waiting for a message");
socket.read_to_string(&mut request).expect("Failed to read request");
println!("Received '{}'", request);
match request.as_ref() {
"PING" => {
reply(&mut socket, "PONG");
}
"STOP" => {
reply(&mut socket, "OK");
println!("Shutting down");
break;
},
_ => reply(&mut socket, "UNKNOWN REQUEST")
}
request.clear();
}
endpoint.shutdown().expect("Failed to shutdown gracefully");
}
fn reply(socket: &mut Socket, reply: &str)
|
{
socket.write_all(reply.as_bytes()).expect("Failed to send reply");
println!("Replied with '{}'", reply)
}
|
identifier_body
|
|
main.rs
|
extern crate nanomsg;
use nanomsg::{Socket, Protocol};
use std::io::{Read, Write};
const ADDRESS: &'static str = "ipc:///tmp/a.ipc";
fn main() {
let mut socket = Socket::new(Protocol::Rep).unwrap();
println!("Connecting to address '{}'", ADDRESS);
let mut endpoint = socket.bind(ADDRESS).unwrap();
let mut request = String::new();
loop {
println!("Waiting for a message");
socket.read_to_string(&mut request).expect("Failed to read request");
println!("Received '{}'", request);
match request.as_ref() {
"PING" => {
reply(&mut socket, "PONG");
|
}
"STOP" => {
reply(&mut socket, "OK");
println!("Shutting down");
break;
},
_ => reply(&mut socket, "UNKNOWN REQUEST")
}
request.clear();
}
endpoint.shutdown().expect("Failed to shutdown gracefully");
}
fn reply(socket: &mut Socket, reply: &str) {
socket.write_all(reply.as_bytes()).expect("Failed to send reply");
println!("Replied with '{}'", reply)
}
|
random_line_split
|
|
memory_io.rs
|
extern crate openexr;
use std::io::Cursor;
use openexr::{FrameBuffer, FrameBufferMut, Header, InputFile, PixelType, ScanlineOutputFile};
#[test]
fn
|
() {
// Target memory for writing
let mut in_memory_buffer = Cursor::new(Vec::<u8>::new());
// Write file to memory
{
let pixel_data = vec![(0.82f32, 1.78f32, 0.21f32); 256 * 256];
let mut exr_file = ScanlineOutputFile::new(
&mut in_memory_buffer,
Header::new()
.set_resolution(256, 256)
.add_channel("R", PixelType::FLOAT)
.add_channel("G", PixelType::FLOAT)
.add_channel("B", PixelType::FLOAT),
)
.unwrap();
let mut fb = FrameBuffer::new(256, 256);
fb.insert_channels(&["R", "G", "B"], &pixel_data);
exr_file.write_pixels(&fb).unwrap();
}
// Read file from memory, and verify its contents
{
let mut pixel_data = vec![(0.0f32, 0.0f32, 0.0f32); 256 * 256];
let mut exr_file = InputFile::from_slice(in_memory_buffer.get_ref()).unwrap();
let (width, height) = exr_file.header().data_dimensions();
// Make sure the image properties are the same.
assert_eq!(width, 256);
assert_eq!(height, 256);
for channel_name in &["R", "G", "B"] {
let channel = exr_file
.header()
.get_channel(channel_name)
.expect(&format!("Didn't find channel {}.", channel_name));
assert_eq!(channel.pixel_type, PixelType::FLOAT);
}
// Read in the pixel data.
{
let mut fb = FrameBufferMut::new(width, height);
fb.insert_channels(&[("R", 0.0), ("G", 0.0), ("B", 0.0)], &mut pixel_data);
exr_file.read_pixels(&mut fb).unwrap();
}
// Verify the data is what we expect
for pixel in pixel_data {
assert_eq!(pixel, (0.82, 1.78, 0.21));
}
}
}
|
memory_io
|
identifier_name
|
memory_io.rs
|
extern crate openexr;
use std::io::Cursor;
use openexr::{FrameBuffer, FrameBufferMut, Header, InputFile, PixelType, ScanlineOutputFile};
#[test]
fn memory_io()
|
exr_file.write_pixels(&fb).unwrap();
}
// Read file from memory, and verify its contents
{
let mut pixel_data = vec![(0.0f32, 0.0f32, 0.0f32); 256 * 256];
let mut exr_file = InputFile::from_slice(in_memory_buffer.get_ref()).unwrap();
let (width, height) = exr_file.header().data_dimensions();
// Make sure the image properties are the same.
assert_eq!(width, 256);
assert_eq!(height, 256);
for channel_name in &["R", "G", "B"] {
let channel = exr_file
.header()
.get_channel(channel_name)
.expect(&format!("Didn't find channel {}.", channel_name));
assert_eq!(channel.pixel_type, PixelType::FLOAT);
}
// Read in the pixel data.
{
let mut fb = FrameBufferMut::new(width, height);
fb.insert_channels(&[("R", 0.0), ("G", 0.0), ("B", 0.0)], &mut pixel_data);
exr_file.read_pixels(&mut fb).unwrap();
}
// Verify the data is what we expect
for pixel in pixel_data {
assert_eq!(pixel, (0.82, 1.78, 0.21));
}
}
}
|
{
// Target memory for writing
let mut in_memory_buffer = Cursor::new(Vec::<u8>::new());
// Write file to memory
{
let pixel_data = vec![(0.82f32, 1.78f32, 0.21f32); 256 * 256];
let mut exr_file = ScanlineOutputFile::new(
&mut in_memory_buffer,
Header::new()
.set_resolution(256, 256)
.add_channel("R", PixelType::FLOAT)
.add_channel("G", PixelType::FLOAT)
.add_channel("B", PixelType::FLOAT),
)
.unwrap();
let mut fb = FrameBuffer::new(256, 256);
fb.insert_channels(&["R", "G", "B"], &pixel_data);
|
identifier_body
|
memory_io.rs
|
extern crate openexr;
use std::io::Cursor;
use openexr::{FrameBuffer, FrameBufferMut, Header, InputFile, PixelType, ScanlineOutputFile};
#[test]
fn memory_io() {
// Target memory for writing
let mut in_memory_buffer = Cursor::new(Vec::<u8>::new());
// Write file to memory
{
let pixel_data = vec![(0.82f32, 1.78f32, 0.21f32); 256 * 256];
let mut exr_file = ScanlineOutputFile::new(
&mut in_memory_buffer,
Header::new()
.set_resolution(256, 256)
.add_channel("R", PixelType::FLOAT)
.add_channel("G", PixelType::FLOAT)
.add_channel("B", PixelType::FLOAT),
)
.unwrap();
|
exr_file.write_pixels(&fb).unwrap();
}
// Read file from memory, and verify its contents
{
let mut pixel_data = vec![(0.0f32, 0.0f32, 0.0f32); 256 * 256];
let mut exr_file = InputFile::from_slice(in_memory_buffer.get_ref()).unwrap();
let (width, height) = exr_file.header().data_dimensions();
// Make sure the image properties are the same.
assert_eq!(width, 256);
assert_eq!(height, 256);
for channel_name in &["R", "G", "B"] {
let channel = exr_file
.header()
.get_channel(channel_name)
.expect(&format!("Didn't find channel {}.", channel_name));
assert_eq!(channel.pixel_type, PixelType::FLOAT);
}
// Read in the pixel data.
{
let mut fb = FrameBufferMut::new(width, height);
fb.insert_channels(&[("R", 0.0), ("G", 0.0), ("B", 0.0)], &mut pixel_data);
exr_file.read_pixels(&mut fb).unwrap();
}
// Verify the data is what we expect
for pixel in pixel_data {
assert_eq!(pixel, (0.82, 1.78, 0.21));
}
}
}
|
let mut fb = FrameBuffer::new(256, 256);
fb.insert_channels(&["R", "G", "B"], &pixel_data);
|
random_line_split
|
column.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use super::*;
use tidb_query_datatype::codec::{datum, Datum};
use tidb_query_datatype::expr::EvalContext;
use tipb::{ColumnInfo, FieldType};
pub const TYPE_VAR_CHAR: i32 = 1;
pub const TYPE_LONG: i32 = 2;
#[derive(Clone)]
pub struct Column {
pub id: i64,
pub(crate) col_type: i32,
// negative means not a index key, 0 means primary key, positive means normal index key.
pub index: i64,
pub(crate) default_val: Option<Datum>,
}
impl Column {
pub fn as_column_info(&self) -> ColumnInfo {
let mut c_info = ColumnInfo::default();
c_info.set_column_id(self.id);
c_info.set_tp(self.col_field_type());
c_info.set_pk_handle(self.index == 0);
if let Some(ref dv) = self.default_val {
c_info.set_default_val(
datum::encode_value(&mut EvalContext::default(), &[dv.clone()]).unwrap(),
)
}
c_info
|
pub fn as_field_type(&self) -> FieldType {
let mut ft = FieldType::default();
ft.set_tp(self.col_field_type());
ft
}
pub fn col_field_type(&self) -> i32 {
match self.col_type {
TYPE_LONG => 8, // FieldTypeTp::LongLong
TYPE_VAR_CHAR => 15, // FieldTypeTp::VarChar
_ => unreachable!("col_type: {}", self.col_type),
}
}
}
pub struct ColumnBuilder {
col_type: i32,
index: i64,
default_val: Option<Datum>,
}
impl ColumnBuilder {
pub fn new() -> ColumnBuilder {
ColumnBuilder {
col_type: TYPE_LONG,
index: -1,
default_val: None,
}
}
pub fn col_type(mut self, t: i32) -> ColumnBuilder {
self.col_type = t;
self
}
pub fn primary_key(mut self, b: bool) -> ColumnBuilder {
if b {
self.index = 0;
} else {
self.index = -1;
}
self
}
pub fn index_key(mut self, idx_id: i64) -> ColumnBuilder {
self.index = idx_id;
self
}
pub fn default(mut self, val: Datum) -> ColumnBuilder {
self.default_val = Some(val);
self
}
pub fn build(self) -> Column {
Column {
id: next_id(),
col_type: self.col_type,
index: self.index,
default_val: self.default_val,
}
}
}
impl Default for ColumnBuilder {
fn default() -> Self {
Self::new()
}
}
|
}
|
random_line_split
|
column.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use super::*;
use tidb_query_datatype::codec::{datum, Datum};
use tidb_query_datatype::expr::EvalContext;
use tipb::{ColumnInfo, FieldType};
pub const TYPE_VAR_CHAR: i32 = 1;
pub const TYPE_LONG: i32 = 2;
#[derive(Clone)]
pub struct
|
{
pub id: i64,
pub(crate) col_type: i32,
// negative means not a index key, 0 means primary key, positive means normal index key.
pub index: i64,
pub(crate) default_val: Option<Datum>,
}
impl Column {
pub fn as_column_info(&self) -> ColumnInfo {
let mut c_info = ColumnInfo::default();
c_info.set_column_id(self.id);
c_info.set_tp(self.col_field_type());
c_info.set_pk_handle(self.index == 0);
if let Some(ref dv) = self.default_val {
c_info.set_default_val(
datum::encode_value(&mut EvalContext::default(), &[dv.clone()]).unwrap(),
)
}
c_info
}
pub fn as_field_type(&self) -> FieldType {
let mut ft = FieldType::default();
ft.set_tp(self.col_field_type());
ft
}
pub fn col_field_type(&self) -> i32 {
match self.col_type {
TYPE_LONG => 8, // FieldTypeTp::LongLong
TYPE_VAR_CHAR => 15, // FieldTypeTp::VarChar
_ => unreachable!("col_type: {}", self.col_type),
}
}
}
pub struct ColumnBuilder {
col_type: i32,
index: i64,
default_val: Option<Datum>,
}
impl ColumnBuilder {
pub fn new() -> ColumnBuilder {
ColumnBuilder {
col_type: TYPE_LONG,
index: -1,
default_val: None,
}
}
pub fn col_type(mut self, t: i32) -> ColumnBuilder {
self.col_type = t;
self
}
pub fn primary_key(mut self, b: bool) -> ColumnBuilder {
if b {
self.index = 0;
} else {
self.index = -1;
}
self
}
pub fn index_key(mut self, idx_id: i64) -> ColumnBuilder {
self.index = idx_id;
self
}
pub fn default(mut self, val: Datum) -> ColumnBuilder {
self.default_val = Some(val);
self
}
pub fn build(self) -> Column {
Column {
id: next_id(),
col_type: self.col_type,
index: self.index,
default_val: self.default_val,
}
}
}
impl Default for ColumnBuilder {
fn default() -> Self {
Self::new()
}
}
|
Column
|
identifier_name
|
procedural_mbe_matching.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote)]
extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{self, str_to_ident, NtExpr, NtPat};
use syntax::ast::{TokenTree, TtToken, Pat};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ptr::P;
use rustc::plugin::Registry;
fn expand_mbe_matches(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
-> Box<MacResult +'static>
|
$arm
_ => false
}
)
}
_ => unreachable!()
}
}
Failure(_, s) | Error(_, s) => {
panic!("expected Success, but got Error/Failure: {}", s);
}
};
MacEager::expr(mac_expr)
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("matches", expand_mbe_matches);
}
|
{
let mbe_matcher = quote_matcher!(cx, $matched:expr, $($pat:pat)|+);
let mac_expr = match TokenTree::parse(cx, &mbe_matcher[..], args) {
Success(map) => {
match (&*map[str_to_ident("matched")], &*map[str_to_ident("pat")]) {
(&MatchedNonterminal(NtExpr(ref matched_expr)),
&MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt|
if let &MatchedNonterminal(NtPat(ref pat)) = &**pat_nt {
pat.clone()
} else {
unreachable!()
}
).collect();
let arm = cx.arm(seq_sp, pats, cx.expr_bool(seq_sp, true));
quote_expr!(cx,
match $matched_expr {
|
identifier_body
|
procedural_mbe_matching.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote)]
extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{self, str_to_ident, NtExpr, NtPat};
use syntax::ast::{TokenTree, TtToken, Pat};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ptr::P;
use rustc::plugin::Registry;
fn expand_mbe_matches(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
-> Box<MacResult +'static> {
let mbe_matcher = quote_matcher!(cx, $matched:expr, $($pat:pat)|+);
let mac_expr = match TokenTree::parse(cx, &mbe_matcher[..], args) {
Success(map) => {
match (&*map[str_to_ident("matched")], &*map[str_to_ident("pat")]) {
(&MatchedNonterminal(NtExpr(ref matched_expr)),
&MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt|
if let &MatchedNonterminal(NtPat(ref pat)) = &**pat_nt {
pat.clone()
} else {
unreachable!()
}
).collect();
let arm = cx.arm(seq_sp, pats, cx.expr_bool(seq_sp, true));
quote_expr!(cx,
match $matched_expr {
$arm
_ => false
}
)
}
_ => unreachable!()
}
}
Failure(_, s) | Error(_, s) => {
panic!("expected Success, but got Error/Failure: {}", s);
}
};
MacEager::expr(mac_expr)
}
|
reg.register_macro("matches", expand_mbe_matches);
}
|
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
|
random_line_split
|
procedural_mbe_matching.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote)]
extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{self, str_to_ident, NtExpr, NtPat};
use syntax::ast::{TokenTree, TtToken, Pat};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ptr::P;
use rustc::plugin::Registry;
fn
|
(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
-> Box<MacResult +'static> {
let mbe_matcher = quote_matcher!(cx, $matched:expr, $($pat:pat)|+);
let mac_expr = match TokenTree::parse(cx, &mbe_matcher[..], args) {
Success(map) => {
match (&*map[str_to_ident("matched")], &*map[str_to_ident("pat")]) {
(&MatchedNonterminal(NtExpr(ref matched_expr)),
&MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt|
if let &MatchedNonterminal(NtPat(ref pat)) = &**pat_nt {
pat.clone()
} else {
unreachable!()
}
).collect();
let arm = cx.arm(seq_sp, pats, cx.expr_bool(seq_sp, true));
quote_expr!(cx,
match $matched_expr {
$arm
_ => false
}
)
}
_ => unreachable!()
}
}
Failure(_, s) | Error(_, s) => {
panic!("expected Success, but got Error/Failure: {}", s);
}
};
MacEager::expr(mac_expr)
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("matches", expand_mbe_matches);
}
|
expand_mbe_matches
|
identifier_name
|
procedural_mbe_matching.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote)]
extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{self, str_to_ident, NtExpr, NtPat};
use syntax::ast::{TokenTree, TtToken, Pat};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ptr::P;
use rustc::plugin::Registry;
fn expand_mbe_matches(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
-> Box<MacResult +'static> {
let mbe_matcher = quote_matcher!(cx, $matched:expr, $($pat:pat)|+);
let mac_expr = match TokenTree::parse(cx, &mbe_matcher[..], args) {
Success(map) => {
match (&*map[str_to_ident("matched")], &*map[str_to_ident("pat")]) {
(&MatchedNonterminal(NtExpr(ref matched_expr)),
&MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt|
if let &MatchedNonterminal(NtPat(ref pat)) = &**pat_nt {
pat.clone()
} else
|
).collect();
let arm = cx.arm(seq_sp, pats, cx.expr_bool(seq_sp, true));
quote_expr!(cx,
match $matched_expr {
$arm
_ => false
}
)
}
_ => unreachable!()
}
}
Failure(_, s) | Error(_, s) => {
panic!("expected Success, but got Error/Failure: {}", s);
}
};
MacEager::expr(mac_expr)
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("matches", expand_mbe_matches);
}
|
{
unreachable!()
}
|
conditional_block
|
fetch.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInfo;
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInit;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseBinding::ResponseMethods;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseType as DOMResponseType;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::DomObject;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::globalscope::GlobalScope;
use crate::dom::headers::Guard;
use crate::dom::promise::Promise;
use crate::dom::request::Request;
use crate::dom::response::Response;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use crate::network_listener::{NetworkListener, PreInvoke};
use crate::task_source::TaskSourceName;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use js::jsapi::JSAutoCompartment;
use net_traits::request::RequestInit as NetTraitsRequestInit;
use net_traits::request::{Request as NetTraitsRequest, ServiceWorkersMode};
use net_traits::CoreResourceMsg::Fetch as NetTraitsFetch;
use net_traits::{FetchChannels, FetchResponseListener, NetworkError};
use net_traits::{FetchMetadata, FilteredMetadata, Metadata};
use servo_url::ServoUrl;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
struct FetchContext {
fetch_promise: Option<TrustedPromise>,
response_object: Trusted<Response>,
body: Vec<u8>,
}
/// RAII fetch canceller object. By default initialized to not having a canceller
/// in it, however you can ask it for a cancellation receiver to send to Fetch
/// in which case it will store the sender. You can manually cancel it
/// or let it cancel on Drop in that case.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct FetchCanceller {
#[ignore_malloc_size_of = "channels are hard"]
cancel_chan: Option<ipc::IpcSender<()>>,
}
impl FetchCanceller {
/// Create an empty FetchCanceller
pub fn new() -> Self
|
/// Obtain an IpcReceiver to send over to Fetch, and initialize
/// the internal sender
pub fn initialize(&mut self) -> ipc::IpcReceiver<()> {
// cancel previous fetch
self.cancel();
let (rx, tx) = ipc::channel().unwrap();
self.cancel_chan = Some(rx);
tx
}
/// Cancel a fetch if it is ongoing
pub fn cancel(&mut self) {
if let Some(chan) = self.cancel_chan.take() {
// stop trying to make fetch happen
// it's not going to happen
// The receiver will be destroyed if the request has already completed;
// so we throw away the error. Cancellation is a courtesy call,
// we don't actually care if the other side heard.
let _ = chan.send(());
}
}
/// Use this if you don't want it to send a cancellation request
/// on drop (e.g. if the fetch completes)
pub fn ignore(&mut self) {
let _ = self.cancel_chan.take();
}
}
impl Drop for FetchCanceller {
fn drop(&mut self) {
self.cancel()
}
}
fn from_referrer_to_referrer_url(request: &NetTraitsRequest) -> Option<ServoUrl> {
request.referrer.to_url().map(|url| url.clone())
}
fn request_init_from_request(request: NetTraitsRequest) -> NetTraitsRequestInit {
NetTraitsRequestInit {
method: request.method.clone(),
url: request.url(),
headers: request.headers.clone(),
unsafe_request: request.unsafe_request,
body: request.body.clone(),
destination: request.destination,
synchronous: request.synchronous,
mode: request.mode.clone(),
use_cors_preflight: request.use_cors_preflight,
credentials_mode: request.credentials_mode,
use_url_credentials: request.use_url_credentials,
origin: GlobalScope::current()
.expect("No current global object")
.origin()
.immutable()
.clone(),
referrer_url: from_referrer_to_referrer_url(&request),
referrer_policy: request.referrer_policy,
pipeline_id: request.pipeline_id,
redirect_mode: request.redirect_mode,
cache_mode: request.cache_mode,
..NetTraitsRequestInit::default()
}
}
// https://fetch.spec.whatwg.org/#fetch-method
#[allow(unrooted_must_root)]
pub fn Fetch(
global: &GlobalScope,
input: RequestInfo,
init: RootedTraceableBox<RequestInit>,
) -> Rc<Promise> {
let core_resource_thread = global.core_resource_thread();
// Step 1
let promise = Promise::new(global);
let response = Response::new(global);
// Step 2
let request = match Request::Constructor(global, input, init) {
Err(e) => {
promise.reject_error(e);
return promise;
},
Ok(r) => r.get_request(),
};
let mut request_init = request_init_from_request(request);
// Step 3
if global.downcast::<ServiceWorkerGlobalScope>().is_some() {
request_init.service_workers_mode = ServiceWorkersMode::Foreign;
}
// Step 4
response.Headers().set_guard(Guard::Immutable);
// Step 5
let (action_sender, action_receiver) = ipc::channel().unwrap();
let fetch_context = Arc::new(Mutex::new(FetchContext {
fetch_promise: Some(TrustedPromise::new(promise.clone())),
response_object: Trusted::new(&*response),
body: vec![],
}));
let listener = NetworkListener {
context: fetch_context,
task_source: global.networking_task_source(),
canceller: Some(global.task_canceller(TaskSourceName::Networking)),
};
ROUTER.add_route(
action_receiver.to_opaque(),
Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}),
);
core_resource_thread
.send(NetTraitsFetch(
request_init,
FetchChannels::ResponseMsg(action_sender, None),
))
.unwrap();
promise
}
impl PreInvoke for FetchContext {}
impl FetchResponseListener for FetchContext {
fn process_request_body(&mut self) {
// TODO
}
fn process_request_eof(&mut self) {
// TODO
}
#[allow(unrooted_must_root)]
fn process_response(&mut self, fetch_metadata: Result<FetchMetadata, NetworkError>) {
let promise = self
.fetch_promise
.take()
.expect("fetch promise is missing")
.root();
// JSAutoCompartment needs to be manually made.
// Otherwise, Servo will crash.
let promise_cx = promise.global().get_cx();
let _ac = JSAutoCompartment::new(promise_cx, promise.reflector().get_jsobject().get());
match fetch_metadata {
// Step 4.1
Err(_) => {
promise.reject_error(Error::Type("Network error occurred".to_string()));
self.fetch_promise = Some(TrustedPromise::new(promise));
self.response_object.root().set_type(DOMResponseType::Error);
return;
},
// Step 4.2
Ok(metadata) => match metadata {
FetchMetadata::Unfiltered(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object
.root()
.set_type(DOMResponseType::Default);
},
FetchMetadata::Filtered { filtered,.. } => match filtered {
FilteredMetadata::Basic(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Basic);
},
FilteredMetadata::Cors(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Cors);
},
FilteredMetadata::Opaque => self
.response_object
.root()
.set_type(DOMResponseType::Opaque),
FilteredMetadata::OpaqueRedirect => self
.response_object
.root()
.set_type(DOMResponseType::Opaqueredirect),
},
},
}
// Step 4.3
promise.resolve_native(&self.response_object.root());
self.fetch_promise = Some(TrustedPromise::new(promise));
}
fn process_response_chunk(&mut self, mut chunk: Vec<u8>) {
self.body.append(&mut chunk);
}
fn process_response_eof(&mut self, _response: Result<(), NetworkError>) {
let response = self.response_object.root();
let global = response.global();
let cx = global.get_cx();
let _ac = JSAutoCompartment::new(cx, global.reflector().get_jsobject().get());
response.finish(mem::replace(&mut self.body, vec![]));
// TODO
//... trailerObject is not supported in Servo yet.
}
}
fn fill_headers_with_metadata(r: DomRoot<Response>, m: Metadata) {
r.set_headers(m.headers);
r.set_raw_status(m.status);
r.set_final_url(m.final_url);
}
|
{
Default::default()
}
|
identifier_body
|
fetch.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInfo;
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInit;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseBinding::ResponseMethods;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseType as DOMResponseType;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::DomObject;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::globalscope::GlobalScope;
use crate::dom::headers::Guard;
use crate::dom::promise::Promise;
use crate::dom::request::Request;
use crate::dom::response::Response;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use crate::network_listener::{NetworkListener, PreInvoke};
use crate::task_source::TaskSourceName;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use js::jsapi::JSAutoCompartment;
use net_traits::request::RequestInit as NetTraitsRequestInit;
use net_traits::request::{Request as NetTraitsRequest, ServiceWorkersMode};
use net_traits::CoreResourceMsg::Fetch as NetTraitsFetch;
use net_traits::{FetchChannels, FetchResponseListener, NetworkError};
use net_traits::{FetchMetadata, FilteredMetadata, Metadata};
use servo_url::ServoUrl;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
struct FetchContext {
fetch_promise: Option<TrustedPromise>,
response_object: Trusted<Response>,
body: Vec<u8>,
}
/// RAII fetch canceller object. By default initialized to not having a canceller
/// in it, however you can ask it for a cancellation receiver to send to Fetch
/// in which case it will store the sender. You can manually cancel it
/// or let it cancel on Drop in that case.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct FetchCanceller {
#[ignore_malloc_size_of = "channels are hard"]
cancel_chan: Option<ipc::IpcSender<()>>,
}
impl FetchCanceller {
/// Create an empty FetchCanceller
pub fn new() -> Self {
Default::default()
}
/// Obtain an IpcReceiver to send over to Fetch, and initialize
/// the internal sender
pub fn initialize(&mut self) -> ipc::IpcReceiver<()> {
// cancel previous fetch
self.cancel();
let (rx, tx) = ipc::channel().unwrap();
self.cancel_chan = Some(rx);
tx
}
/// Cancel a fetch if it is ongoing
pub fn cancel(&mut self) {
if let Some(chan) = self.cancel_chan.take() {
// stop trying to make fetch happen
// it's not going to happen
// The receiver will be destroyed if the request has already completed;
// so we throw away the error. Cancellation is a courtesy call,
// we don't actually care if the other side heard.
let _ = chan.send(());
}
}
/// Use this if you don't want it to send a cancellation request
/// on drop (e.g. if the fetch completes)
pub fn ignore(&mut self) {
let _ = self.cancel_chan.take();
}
}
impl Drop for FetchCanceller {
fn
|
(&mut self) {
self.cancel()
}
}
fn from_referrer_to_referrer_url(request: &NetTraitsRequest) -> Option<ServoUrl> {
request.referrer.to_url().map(|url| url.clone())
}
fn request_init_from_request(request: NetTraitsRequest) -> NetTraitsRequestInit {
NetTraitsRequestInit {
method: request.method.clone(),
url: request.url(),
headers: request.headers.clone(),
unsafe_request: request.unsafe_request,
body: request.body.clone(),
destination: request.destination,
synchronous: request.synchronous,
mode: request.mode.clone(),
use_cors_preflight: request.use_cors_preflight,
credentials_mode: request.credentials_mode,
use_url_credentials: request.use_url_credentials,
origin: GlobalScope::current()
.expect("No current global object")
.origin()
.immutable()
.clone(),
referrer_url: from_referrer_to_referrer_url(&request),
referrer_policy: request.referrer_policy,
pipeline_id: request.pipeline_id,
redirect_mode: request.redirect_mode,
cache_mode: request.cache_mode,
..NetTraitsRequestInit::default()
}
}
// https://fetch.spec.whatwg.org/#fetch-method
#[allow(unrooted_must_root)]
pub fn Fetch(
global: &GlobalScope,
input: RequestInfo,
init: RootedTraceableBox<RequestInit>,
) -> Rc<Promise> {
let core_resource_thread = global.core_resource_thread();
// Step 1
let promise = Promise::new(global);
let response = Response::new(global);
// Step 2
let request = match Request::Constructor(global, input, init) {
Err(e) => {
promise.reject_error(e);
return promise;
},
Ok(r) => r.get_request(),
};
let mut request_init = request_init_from_request(request);
// Step 3
if global.downcast::<ServiceWorkerGlobalScope>().is_some() {
request_init.service_workers_mode = ServiceWorkersMode::Foreign;
}
// Step 4
response.Headers().set_guard(Guard::Immutable);
// Step 5
let (action_sender, action_receiver) = ipc::channel().unwrap();
let fetch_context = Arc::new(Mutex::new(FetchContext {
fetch_promise: Some(TrustedPromise::new(promise.clone())),
response_object: Trusted::new(&*response),
body: vec![],
}));
let listener = NetworkListener {
context: fetch_context,
task_source: global.networking_task_source(),
canceller: Some(global.task_canceller(TaskSourceName::Networking)),
};
ROUTER.add_route(
action_receiver.to_opaque(),
Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}),
);
core_resource_thread
.send(NetTraitsFetch(
request_init,
FetchChannels::ResponseMsg(action_sender, None),
))
.unwrap();
promise
}
impl PreInvoke for FetchContext {}
impl FetchResponseListener for FetchContext {
fn process_request_body(&mut self) {
// TODO
}
fn process_request_eof(&mut self) {
// TODO
}
#[allow(unrooted_must_root)]
fn process_response(&mut self, fetch_metadata: Result<FetchMetadata, NetworkError>) {
let promise = self
.fetch_promise
.take()
.expect("fetch promise is missing")
.root();
// JSAutoCompartment needs to be manually made.
// Otherwise, Servo will crash.
let promise_cx = promise.global().get_cx();
let _ac = JSAutoCompartment::new(promise_cx, promise.reflector().get_jsobject().get());
match fetch_metadata {
// Step 4.1
Err(_) => {
promise.reject_error(Error::Type("Network error occurred".to_string()));
self.fetch_promise = Some(TrustedPromise::new(promise));
self.response_object.root().set_type(DOMResponseType::Error);
return;
},
// Step 4.2
Ok(metadata) => match metadata {
FetchMetadata::Unfiltered(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object
.root()
.set_type(DOMResponseType::Default);
},
FetchMetadata::Filtered { filtered,.. } => match filtered {
FilteredMetadata::Basic(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Basic);
},
FilteredMetadata::Cors(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Cors);
},
FilteredMetadata::Opaque => self
.response_object
.root()
.set_type(DOMResponseType::Opaque),
FilteredMetadata::OpaqueRedirect => self
.response_object
.root()
.set_type(DOMResponseType::Opaqueredirect),
},
},
}
// Step 4.3
promise.resolve_native(&self.response_object.root());
self.fetch_promise = Some(TrustedPromise::new(promise));
}
fn process_response_chunk(&mut self, mut chunk: Vec<u8>) {
self.body.append(&mut chunk);
}
fn process_response_eof(&mut self, _response: Result<(), NetworkError>) {
let response = self.response_object.root();
let global = response.global();
let cx = global.get_cx();
let _ac = JSAutoCompartment::new(cx, global.reflector().get_jsobject().get());
response.finish(mem::replace(&mut self.body, vec![]));
// TODO
//... trailerObject is not supported in Servo yet.
}
}
fn fill_headers_with_metadata(r: DomRoot<Response>, m: Metadata) {
r.set_headers(m.headers);
r.set_raw_status(m.status);
r.set_final_url(m.final_url);
}
|
drop
|
identifier_name
|
fetch.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInfo;
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInit;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseBinding::ResponseMethods;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseType as DOMResponseType;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::DomObject;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::globalscope::GlobalScope;
use crate::dom::headers::Guard;
use crate::dom::promise::Promise;
use crate::dom::request::Request;
use crate::dom::response::Response;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use crate::network_listener::{NetworkListener, PreInvoke};
use crate::task_source::TaskSourceName;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use js::jsapi::JSAutoCompartment;
use net_traits::request::RequestInit as NetTraitsRequestInit;
use net_traits::request::{Request as NetTraitsRequest, ServiceWorkersMode};
use net_traits::CoreResourceMsg::Fetch as NetTraitsFetch;
use net_traits::{FetchChannels, FetchResponseListener, NetworkError};
use net_traits::{FetchMetadata, FilteredMetadata, Metadata};
use servo_url::ServoUrl;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
struct FetchContext {
fetch_promise: Option<TrustedPromise>,
response_object: Trusted<Response>,
body: Vec<u8>,
}
/// RAII fetch canceller object. By default initialized to not having a canceller
/// in it, however you can ask it for a cancellation receiver to send to Fetch
/// in which case it will store the sender. You can manually cancel it
/// or let it cancel on Drop in that case.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct FetchCanceller {
#[ignore_malloc_size_of = "channels are hard"]
cancel_chan: Option<ipc::IpcSender<()>>,
}
impl FetchCanceller {
/// Create an empty FetchCanceller
pub fn new() -> Self {
Default::default()
}
/// Obtain an IpcReceiver to send over to Fetch, and initialize
/// the internal sender
pub fn initialize(&mut self) -> ipc::IpcReceiver<()> {
// cancel previous fetch
self.cancel();
let (rx, tx) = ipc::channel().unwrap();
self.cancel_chan = Some(rx);
tx
}
/// Cancel a fetch if it is ongoing
pub fn cancel(&mut self) {
if let Some(chan) = self.cancel_chan.take() {
// stop trying to make fetch happen
// it's not going to happen
// The receiver will be destroyed if the request has already completed;
// so we throw away the error. Cancellation is a courtesy call,
// we don't actually care if the other side heard.
let _ = chan.send(());
}
}
/// Use this if you don't want it to send a cancellation request
/// on drop (e.g. if the fetch completes)
pub fn ignore(&mut self) {
let _ = self.cancel_chan.take();
}
}
impl Drop for FetchCanceller {
fn drop(&mut self) {
self.cancel()
}
}
fn from_referrer_to_referrer_url(request: &NetTraitsRequest) -> Option<ServoUrl> {
request.referrer.to_url().map(|url| url.clone())
}
fn request_init_from_request(request: NetTraitsRequest) -> NetTraitsRequestInit {
NetTraitsRequestInit {
method: request.method.clone(),
url: request.url(),
headers: request.headers.clone(),
unsafe_request: request.unsafe_request,
body: request.body.clone(),
destination: request.destination,
synchronous: request.synchronous,
mode: request.mode.clone(),
use_cors_preflight: request.use_cors_preflight,
credentials_mode: request.credentials_mode,
use_url_credentials: request.use_url_credentials,
origin: GlobalScope::current()
.expect("No current global object")
.origin()
.immutable()
.clone(),
referrer_url: from_referrer_to_referrer_url(&request),
referrer_policy: request.referrer_policy,
pipeline_id: request.pipeline_id,
redirect_mode: request.redirect_mode,
cache_mode: request.cache_mode,
..NetTraitsRequestInit::default()
}
}
// https://fetch.spec.whatwg.org/#fetch-method
#[allow(unrooted_must_root)]
pub fn Fetch(
global: &GlobalScope,
input: RequestInfo,
init: RootedTraceableBox<RequestInit>,
) -> Rc<Promise> {
let core_resource_thread = global.core_resource_thread();
// Step 1
let promise = Promise::new(global);
let response = Response::new(global);
// Step 2
let request = match Request::Constructor(global, input, init) {
Err(e) => {
promise.reject_error(e);
return promise;
},
Ok(r) => r.get_request(),
};
let mut request_init = request_init_from_request(request);
// Step 3
if global.downcast::<ServiceWorkerGlobalScope>().is_some() {
request_init.service_workers_mode = ServiceWorkersMode::Foreign;
}
|
// Step 5
let (action_sender, action_receiver) = ipc::channel().unwrap();
let fetch_context = Arc::new(Mutex::new(FetchContext {
fetch_promise: Some(TrustedPromise::new(promise.clone())),
response_object: Trusted::new(&*response),
body: vec![],
}));
let listener = NetworkListener {
context: fetch_context,
task_source: global.networking_task_source(),
canceller: Some(global.task_canceller(TaskSourceName::Networking)),
};
ROUTER.add_route(
action_receiver.to_opaque(),
Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}),
);
core_resource_thread
.send(NetTraitsFetch(
request_init,
FetchChannels::ResponseMsg(action_sender, None),
))
.unwrap();
promise
}
impl PreInvoke for FetchContext {}
impl FetchResponseListener for FetchContext {
fn process_request_body(&mut self) {
// TODO
}
fn process_request_eof(&mut self) {
// TODO
}
#[allow(unrooted_must_root)]
fn process_response(&mut self, fetch_metadata: Result<FetchMetadata, NetworkError>) {
let promise = self
.fetch_promise
.take()
.expect("fetch promise is missing")
.root();
// JSAutoCompartment needs to be manually made.
// Otherwise, Servo will crash.
let promise_cx = promise.global().get_cx();
let _ac = JSAutoCompartment::new(promise_cx, promise.reflector().get_jsobject().get());
match fetch_metadata {
// Step 4.1
Err(_) => {
promise.reject_error(Error::Type("Network error occurred".to_string()));
self.fetch_promise = Some(TrustedPromise::new(promise));
self.response_object.root().set_type(DOMResponseType::Error);
return;
},
// Step 4.2
Ok(metadata) => match metadata {
FetchMetadata::Unfiltered(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object
.root()
.set_type(DOMResponseType::Default);
},
FetchMetadata::Filtered { filtered,.. } => match filtered {
FilteredMetadata::Basic(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Basic);
},
FilteredMetadata::Cors(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Cors);
},
FilteredMetadata::Opaque => self
.response_object
.root()
.set_type(DOMResponseType::Opaque),
FilteredMetadata::OpaqueRedirect => self
.response_object
.root()
.set_type(DOMResponseType::Opaqueredirect),
},
},
}
// Step 4.3
promise.resolve_native(&self.response_object.root());
self.fetch_promise = Some(TrustedPromise::new(promise));
}
fn process_response_chunk(&mut self, mut chunk: Vec<u8>) {
self.body.append(&mut chunk);
}
fn process_response_eof(&mut self, _response: Result<(), NetworkError>) {
let response = self.response_object.root();
let global = response.global();
let cx = global.get_cx();
let _ac = JSAutoCompartment::new(cx, global.reflector().get_jsobject().get());
response.finish(mem::replace(&mut self.body, vec![]));
// TODO
//... trailerObject is not supported in Servo yet.
}
}
fn fill_headers_with_metadata(r: DomRoot<Response>, m: Metadata) {
r.set_headers(m.headers);
r.set_raw_status(m.status);
r.set_final_url(m.final_url);
}
|
// Step 4
response.Headers().set_guard(Guard::Immutable);
|
random_line_split
|
fetch.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInfo;
use crate::dom::bindings::codegen::Bindings::RequestBinding::RequestInit;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseBinding::ResponseMethods;
use crate::dom::bindings::codegen::Bindings::ResponseBinding::ResponseType as DOMResponseType;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::DomObject;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::globalscope::GlobalScope;
use crate::dom::headers::Guard;
use crate::dom::promise::Promise;
use crate::dom::request::Request;
use crate::dom::response::Response;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use crate::network_listener::{NetworkListener, PreInvoke};
use crate::task_source::TaskSourceName;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use js::jsapi::JSAutoCompartment;
use net_traits::request::RequestInit as NetTraitsRequestInit;
use net_traits::request::{Request as NetTraitsRequest, ServiceWorkersMode};
use net_traits::CoreResourceMsg::Fetch as NetTraitsFetch;
use net_traits::{FetchChannels, FetchResponseListener, NetworkError};
use net_traits::{FetchMetadata, FilteredMetadata, Metadata};
use servo_url::ServoUrl;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
struct FetchContext {
fetch_promise: Option<TrustedPromise>,
response_object: Trusted<Response>,
body: Vec<u8>,
}
/// RAII fetch canceller object. By default initialized to not having a canceller
/// in it, however you can ask it for a cancellation receiver to send to Fetch
/// in which case it will store the sender. You can manually cancel it
/// or let it cancel on Drop in that case.
#[derive(Default, JSTraceable, MallocSizeOf)]
pub struct FetchCanceller {
#[ignore_malloc_size_of = "channels are hard"]
cancel_chan: Option<ipc::IpcSender<()>>,
}
impl FetchCanceller {
/// Create an empty FetchCanceller
pub fn new() -> Self {
Default::default()
}
/// Obtain an IpcReceiver to send over to Fetch, and initialize
/// the internal sender
pub fn initialize(&mut self) -> ipc::IpcReceiver<()> {
// cancel previous fetch
self.cancel();
let (rx, tx) = ipc::channel().unwrap();
self.cancel_chan = Some(rx);
tx
}
/// Cancel a fetch if it is ongoing
pub fn cancel(&mut self) {
if let Some(chan) = self.cancel_chan.take() {
// stop trying to make fetch happen
// it's not going to happen
// The receiver will be destroyed if the request has already completed;
// so we throw away the error. Cancellation is a courtesy call,
// we don't actually care if the other side heard.
let _ = chan.send(());
}
}
/// Use this if you don't want it to send a cancellation request
/// on drop (e.g. if the fetch completes)
pub fn ignore(&mut self) {
let _ = self.cancel_chan.take();
}
}
impl Drop for FetchCanceller {
fn drop(&mut self) {
self.cancel()
}
}
fn from_referrer_to_referrer_url(request: &NetTraitsRequest) -> Option<ServoUrl> {
request.referrer.to_url().map(|url| url.clone())
}
fn request_init_from_request(request: NetTraitsRequest) -> NetTraitsRequestInit {
NetTraitsRequestInit {
method: request.method.clone(),
url: request.url(),
headers: request.headers.clone(),
unsafe_request: request.unsafe_request,
body: request.body.clone(),
destination: request.destination,
synchronous: request.synchronous,
mode: request.mode.clone(),
use_cors_preflight: request.use_cors_preflight,
credentials_mode: request.credentials_mode,
use_url_credentials: request.use_url_credentials,
origin: GlobalScope::current()
.expect("No current global object")
.origin()
.immutable()
.clone(),
referrer_url: from_referrer_to_referrer_url(&request),
referrer_policy: request.referrer_policy,
pipeline_id: request.pipeline_id,
redirect_mode: request.redirect_mode,
cache_mode: request.cache_mode,
..NetTraitsRequestInit::default()
}
}
// https://fetch.spec.whatwg.org/#fetch-method
#[allow(unrooted_must_root)]
pub fn Fetch(
global: &GlobalScope,
input: RequestInfo,
init: RootedTraceableBox<RequestInit>,
) -> Rc<Promise> {
let core_resource_thread = global.core_resource_thread();
// Step 1
let promise = Promise::new(global);
let response = Response::new(global);
// Step 2
let request = match Request::Constructor(global, input, init) {
Err(e) => {
promise.reject_error(e);
return promise;
},
Ok(r) => r.get_request(),
};
let mut request_init = request_init_from_request(request);
// Step 3
if global.downcast::<ServiceWorkerGlobalScope>().is_some() {
request_init.service_workers_mode = ServiceWorkersMode::Foreign;
}
// Step 4
response.Headers().set_guard(Guard::Immutable);
// Step 5
let (action_sender, action_receiver) = ipc::channel().unwrap();
let fetch_context = Arc::new(Mutex::new(FetchContext {
fetch_promise: Some(TrustedPromise::new(promise.clone())),
response_object: Trusted::new(&*response),
body: vec![],
}));
let listener = NetworkListener {
context: fetch_context,
task_source: global.networking_task_source(),
canceller: Some(global.task_canceller(TaskSourceName::Networking)),
};
ROUTER.add_route(
action_receiver.to_opaque(),
Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}),
);
core_resource_thread
.send(NetTraitsFetch(
request_init,
FetchChannels::ResponseMsg(action_sender, None),
))
.unwrap();
promise
}
impl PreInvoke for FetchContext {}
impl FetchResponseListener for FetchContext {
fn process_request_body(&mut self) {
// TODO
}
fn process_request_eof(&mut self) {
// TODO
}
#[allow(unrooted_must_root)]
fn process_response(&mut self, fetch_metadata: Result<FetchMetadata, NetworkError>) {
let promise = self
.fetch_promise
.take()
.expect("fetch promise is missing")
.root();
// JSAutoCompartment needs to be manually made.
// Otherwise, Servo will crash.
let promise_cx = promise.global().get_cx();
let _ac = JSAutoCompartment::new(promise_cx, promise.reflector().get_jsobject().get());
match fetch_metadata {
// Step 4.1
Err(_) =>
|
,
// Step 4.2
Ok(metadata) => match metadata {
FetchMetadata::Unfiltered(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object
.root()
.set_type(DOMResponseType::Default);
},
FetchMetadata::Filtered { filtered,.. } => match filtered {
FilteredMetadata::Basic(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Basic);
},
FilteredMetadata::Cors(m) => {
fill_headers_with_metadata(self.response_object.root(), m);
self.response_object.root().set_type(DOMResponseType::Cors);
},
FilteredMetadata::Opaque => self
.response_object
.root()
.set_type(DOMResponseType::Opaque),
FilteredMetadata::OpaqueRedirect => self
.response_object
.root()
.set_type(DOMResponseType::Opaqueredirect),
},
},
}
// Step 4.3
promise.resolve_native(&self.response_object.root());
self.fetch_promise = Some(TrustedPromise::new(promise));
}
fn process_response_chunk(&mut self, mut chunk: Vec<u8>) {
self.body.append(&mut chunk);
}
fn process_response_eof(&mut self, _response: Result<(), NetworkError>) {
let response = self.response_object.root();
let global = response.global();
let cx = global.get_cx();
let _ac = JSAutoCompartment::new(cx, global.reflector().get_jsobject().get());
response.finish(mem::replace(&mut self.body, vec![]));
// TODO
//... trailerObject is not supported in Servo yet.
}
}
fn fill_headers_with_metadata(r: DomRoot<Response>, m: Metadata) {
r.set_headers(m.headers);
r.set_raw_status(m.status);
r.set_final_url(m.final_url);
}
|
{
promise.reject_error(Error::Type("Network error occurred".to_string()));
self.fetch_promise = Some(TrustedPromise::new(promise));
self.response_object.root().set_type(DOMResponseType::Error);
return;
}
|
conditional_block
|
details_window.rs
|
// Copyright 2015 Virgil Dupras
//
// This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
// which should be included with this package. The terms are also available at
// http://www.gnu.org/licenses/gpl-3.0.html
//
use rustty::{CellAccessor, Cell, HasSize};
use rustty::ui::{Painter, Widget, Alignable, HorizontalAlign, VerticalAlign};
use hexpos::Pos;
use map::LiveMap;
pub struct
|
{
window: Widget,
}
impl DetailsWindow {
pub fn new(parent: &HasSize) -> DetailsWindow {
let mut window = Widget::new(16, 7);
window.align(parent, HorizontalAlign::Right, VerticalAlign::Bottom, 0);
DetailsWindow { window: window }
}
pub fn draw_into(&self, cells: &mut CellAccessor) {
self.window.draw_into(cells);
}
pub fn update(&mut self, selected_pos: Option<Pos>, map: &LiveMap, turn: u16, movemode: &str) {
let turn_line = format!("Turn {}", turn);
let (terrain_name, maybe_unit_id) = match selected_pos {
Some(pos) => {
(map.terrain().get_terrain(pos).name().to_owned(),
map.units().unit_at_pos(pos))
}
None => ("".to_owned(), None),
};
let (unit_name, unit_stats) = if let Some(uid) = maybe_unit_id {
let unit = map.units().get(uid);
(unit.name(),
format!("MV {} / HP {}", unit.movements(), unit.hp()))
} else {
("", "".to_owned())
};
let lines = [unit_name, &unit_stats[..], &terrain_name[..], &turn_line[..], movemode];
self.window.clear(Cell::default());
for (index, line) in lines.iter().enumerate() {
self.window.printline(2, index + 1, line);
}
self.window.draw_box();
}
}
|
DetailsWindow
|
identifier_name
|
details_window.rs
|
// Copyright 2015 Virgil Dupras
//
// This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
// which should be included with this package. The terms are also available at
// http://www.gnu.org/licenses/gpl-3.0.html
//
use rustty::{CellAccessor, Cell, HasSize};
use rustty::ui::{Painter, Widget, Alignable, HorizontalAlign, VerticalAlign};
use hexpos::Pos;
use map::LiveMap;
pub struct DetailsWindow {
window: Widget,
}
impl DetailsWindow {
pub fn new(parent: &HasSize) -> DetailsWindow
|
pub fn draw_into(&self, cells: &mut CellAccessor) {
self.window.draw_into(cells);
}
pub fn update(&mut self, selected_pos: Option<Pos>, map: &LiveMap, turn: u16, movemode: &str) {
let turn_line = format!("Turn {}", turn);
let (terrain_name, maybe_unit_id) = match selected_pos {
Some(pos) => {
(map.terrain().get_terrain(pos).name().to_owned(),
map.units().unit_at_pos(pos))
}
None => ("".to_owned(), None),
};
let (unit_name, unit_stats) = if let Some(uid) = maybe_unit_id {
let unit = map.units().get(uid);
(unit.name(),
format!("MV {} / HP {}", unit.movements(), unit.hp()))
} else {
("", "".to_owned())
};
let lines = [unit_name, &unit_stats[..], &terrain_name[..], &turn_line[..], movemode];
self.window.clear(Cell::default());
for (index, line) in lines.iter().enumerate() {
self.window.printline(2, index + 1, line);
}
self.window.draw_box();
}
}
|
{
let mut window = Widget::new(16, 7);
window.align(parent, HorizontalAlign::Right, VerticalAlign::Bottom, 0);
DetailsWindow { window: window }
}
|
identifier_body
|
details_window.rs
|
// Copyright 2015 Virgil Dupras
//
// This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
// which should be included with this package. The terms are also available at
// http://www.gnu.org/licenses/gpl-3.0.html
//
use rustty::{CellAccessor, Cell, HasSize};
use rustty::ui::{Painter, Widget, Alignable, HorizontalAlign, VerticalAlign};
use hexpos::Pos;
use map::LiveMap;
pub struct DetailsWindow {
window: Widget,
}
impl DetailsWindow {
pub fn new(parent: &HasSize) -> DetailsWindow {
let mut window = Widget::new(16, 7);
window.align(parent, HorizontalAlign::Right, VerticalAlign::Bottom, 0);
DetailsWindow { window: window }
}
pub fn draw_into(&self, cells: &mut CellAccessor) {
self.window.draw_into(cells);
}
pub fn update(&mut self, selected_pos: Option<Pos>, map: &LiveMap, turn: u16, movemode: &str) {
let turn_line = format!("Turn {}", turn);
let (terrain_name, maybe_unit_id) = match selected_pos {
Some(pos) => {
(map.terrain().get_terrain(pos).name().to_owned(),
map.units().unit_at_pos(pos))
}
None => ("".to_owned(), None),
};
let (unit_name, unit_stats) = if let Some(uid) = maybe_unit_id {
let unit = map.units().get(uid);
|
("", "".to_owned())
};
let lines = [unit_name, &unit_stats[..], &terrain_name[..], &turn_line[..], movemode];
self.window.clear(Cell::default());
for (index, line) in lines.iter().enumerate() {
self.window.printline(2, index + 1, line);
}
self.window.draw_box();
}
}
|
(unit.name(),
format!("MV {} / HP {}", unit.movements(), unit.hp()))
} else {
|
random_line_split
|
column.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<%helpers:shorthand name="columns" sub_properties="column-count column-width" experimental="True"
extra_prefixes="moz" spec="https://drafts.csswg.org/css-multicol/#propdef-columns">
use properties::longhands::{column_count, column_width};
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
let mut column_count = None;
let mut column_width = None;
let mut autos = 0;
loop {
if input.try(|input| input.expect_ident_matching("auto")).is_ok() {
// Leave the options to None, 'auto' is the initial value.
autos += 1;
continue
}
if column_count.is_none() {
if let Ok(value) = input.try(|input| column_count::parse(context, input)) {
column_count = Some(value);
continue
}
}
if column_width.is_none() {
if let Ok(value) = input.try(|input| column_width::parse(context, input)) {
column_width = Some(value);
continue
}
}
break
}
let values = autos + column_count.iter().len() + column_width.iter().len();
if values == 0 || values > 2 {
Err(StyleParseError::UnspecifiedError.into())
} else {
Ok(expanded! {
column_count: unwrap_or_initial!(column_count),
column_width: unwrap_or_initial!(column_width),
})
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.column_width.to_css(dest));
try!(write!(dest, " "));
self.column_count.to_css(dest)
}
}
</%helpers:shorthand>
<%helpers:shorthand name="column-rule" products="gecko" extra_prefixes="moz"
sub_properties="column-rule-width column-rule-style column-rule-color"
spec="https://drafts.csswg.org/css-multicol/#propdef-column-rule">
use properties::longhands::{column_rule_width, column_rule_style};
use properties::longhands::column_rule_color;
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
% for name in "width style color".split():
let mut column_rule_${name} = None;
% endfor
let mut any = false;
loop {
% for name in "width style color".split():
if column_rule_${name}.is_none() {
if let Ok(value) = input.try(|input|
column_rule_${name}::parse(context, input)) {
column_rule_${name} = Some(value);
any = true;
continue
}
}
% endfor
break
}
if any {
Ok(expanded! {
column_rule_width: unwrap_or_initial!(column_rule_width),
column_rule_style: unwrap_or_initial!(column_rule_style),
column_rule_color: unwrap_or_initial!(column_rule_color),
})
} else {
Err(StyleParseError::UnspecifiedError.into())
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write
|
}
</%helpers:shorthand>
|
{
self.column_rule_width.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_style.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_color.to_css(dest)
}
|
identifier_body
|
column.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<%helpers:shorthand name="columns" sub_properties="column-count column-width" experimental="True"
extra_prefixes="moz" spec="https://drafts.csswg.org/css-multicol/#propdef-columns">
use properties::longhands::{column_count, column_width};
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
let mut column_count = None;
let mut column_width = None;
let mut autos = 0;
loop {
if input.try(|input| input.expect_ident_matching("auto")).is_ok() {
// Leave the options to None, 'auto' is the initial value.
autos += 1;
continue
}
if column_count.is_none() {
if let Ok(value) = input.try(|input| column_count::parse(context, input)) {
column_count = Some(value);
continue
}
}
if column_width.is_none() {
if let Ok(value) = input.try(|input| column_width::parse(context, input)) {
column_width = Some(value);
continue
}
}
break
}
let values = autos + column_count.iter().len() + column_width.iter().len();
if values == 0 || values > 2 {
Err(StyleParseError::UnspecifiedError.into())
} else {
Ok(expanded! {
column_count: unwrap_or_initial!(column_count),
column_width: unwrap_or_initial!(column_width),
})
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.column_width.to_css(dest));
try!(write!(dest, " "));
self.column_count.to_css(dest)
}
}
</%helpers:shorthand>
<%helpers:shorthand name="column-rule" products="gecko" extra_prefixes="moz"
sub_properties="column-rule-width column-rule-style column-rule-color"
spec="https://drafts.csswg.org/css-multicol/#propdef-column-rule">
use properties::longhands::{column_rule_width, column_rule_style};
use properties::longhands::column_rule_color;
pub fn
|
<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
% for name in "width style color".split():
let mut column_rule_${name} = None;
% endfor
let mut any = false;
loop {
% for name in "width style color".split():
if column_rule_${name}.is_none() {
if let Ok(value) = input.try(|input|
column_rule_${name}::parse(context, input)) {
column_rule_${name} = Some(value);
any = true;
continue
}
}
% endfor
break
}
if any {
Ok(expanded! {
column_rule_width: unwrap_or_initial!(column_rule_width),
column_rule_style: unwrap_or_initial!(column_rule_style),
column_rule_color: unwrap_or_initial!(column_rule_color),
})
} else {
Err(StyleParseError::UnspecifiedError.into())
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.column_rule_width.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_style.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_color.to_css(dest)
}
}
</%helpers:shorthand>
|
parse_value
|
identifier_name
|
column.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<%helpers:shorthand name="columns" sub_properties="column-count column-width" experimental="True"
extra_prefixes="moz" spec="https://drafts.csswg.org/css-multicol/#propdef-columns">
use properties::longhands::{column_count, column_width};
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
let mut column_count = None;
let mut column_width = None;
let mut autos = 0;
|
// Leave the options to None, 'auto' is the initial value.
autos += 1;
continue
}
if column_count.is_none() {
if let Ok(value) = input.try(|input| column_count::parse(context, input)) {
column_count = Some(value);
continue
}
}
if column_width.is_none() {
if let Ok(value) = input.try(|input| column_width::parse(context, input)) {
column_width = Some(value);
continue
}
}
break
}
let values = autos + column_count.iter().len() + column_width.iter().len();
if values == 0 || values > 2 {
Err(StyleParseError::UnspecifiedError.into())
} else {
Ok(expanded! {
column_count: unwrap_or_initial!(column_count),
column_width: unwrap_or_initial!(column_width),
})
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.column_width.to_css(dest));
try!(write!(dest, " "));
self.column_count.to_css(dest)
}
}
</%helpers:shorthand>
<%helpers:shorthand name="column-rule" products="gecko" extra_prefixes="moz"
sub_properties="column-rule-width column-rule-style column-rule-color"
spec="https://drafts.csswg.org/css-multicol/#propdef-column-rule">
use properties::longhands::{column_rule_width, column_rule_style};
use properties::longhands::column_rule_color;
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
% for name in "width style color".split():
let mut column_rule_${name} = None;
% endfor
let mut any = false;
loop {
% for name in "width style color".split():
if column_rule_${name}.is_none() {
if let Ok(value) = input.try(|input|
column_rule_${name}::parse(context, input)) {
column_rule_${name} = Some(value);
any = true;
continue
}
}
% endfor
break
}
if any {
Ok(expanded! {
column_rule_width: unwrap_or_initial!(column_rule_width),
column_rule_style: unwrap_or_initial!(column_rule_style),
column_rule_color: unwrap_or_initial!(column_rule_color),
})
} else {
Err(StyleParseError::UnspecifiedError.into())
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.column_rule_width.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_style.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_color.to_css(dest)
}
}
</%helpers:shorthand>
|
loop {
if input.try(|input| input.expect_ident_matching("auto")).is_ok() {
|
random_line_split
|
column.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<%helpers:shorthand name="columns" sub_properties="column-count column-width" experimental="True"
extra_prefixes="moz" spec="https://drafts.csswg.org/css-multicol/#propdef-columns">
use properties::longhands::{column_count, column_width};
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
let mut column_count = None;
let mut column_width = None;
let mut autos = 0;
loop {
if input.try(|input| input.expect_ident_matching("auto")).is_ok() {
// Leave the options to None, 'auto' is the initial value.
autos += 1;
continue
}
if column_count.is_none() {
if let Ok(value) = input.try(|input| column_count::parse(context, input)) {
column_count = Some(value);
continue
}
}
if column_width.is_none() {
if let Ok(value) = input.try(|input| column_width::parse(context, input)) {
column_width = Some(value);
continue
}
}
break
}
let values = autos + column_count.iter().len() + column_width.iter().len();
if values == 0 || values > 2 {
Err(StyleParseError::UnspecifiedError.into())
} else
|
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.column_width.to_css(dest));
try!(write!(dest, " "));
self.column_count.to_css(dest)
}
}
</%helpers:shorthand>
<%helpers:shorthand name="column-rule" products="gecko" extra_prefixes="moz"
sub_properties="column-rule-width column-rule-style column-rule-color"
spec="https://drafts.csswg.org/css-multicol/#propdef-column-rule">
use properties::longhands::{column_rule_width, column_rule_style};
use properties::longhands::column_rule_color;
pub fn parse_value<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Longhands, ParseError<'i>> {
% for name in "width style color".split():
let mut column_rule_${name} = None;
% endfor
let mut any = false;
loop {
% for name in "width style color".split():
if column_rule_${name}.is_none() {
if let Ok(value) = input.try(|input|
column_rule_${name}::parse(context, input)) {
column_rule_${name} = Some(value);
any = true;
continue
}
}
% endfor
break
}
if any {
Ok(expanded! {
column_rule_width: unwrap_or_initial!(column_rule_width),
column_rule_style: unwrap_or_initial!(column_rule_style),
column_rule_color: unwrap_or_initial!(column_rule_color),
})
} else {
Err(StyleParseError::UnspecifiedError.into())
}
}
impl<'a> ToCss for LonghandsToSerialize<'a> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.column_rule_width.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_style.to_css(dest)?;
dest.write_str(" ")?;
self.column_rule_color.to_css(dest)
}
}
</%helpers:shorthand>
|
{
Ok(expanded! {
column_count: unwrap_or_initial!(column_count),
column_width: unwrap_or_initial!(column_width),
})
}
|
conditional_block
|
mod.rs
|
//! Various code related to computing outlives relations.
pub mod env;
pub mod obligations;
pub mod verify;
use rustc_middle::traits::query::OutlivesBound;
use rustc_middle::ty;
|
param_env: ty::ParamEnv<'tcx>,
) -> impl Iterator<Item = OutlivesBound<'tcx>> + 'tcx {
debug!("explicit_outlives_bounds()");
param_env
.caller_bounds()
.into_iter()
.map(ty::Predicate::kind)
.filter_map(ty::Binder::no_bound_vars)
.filter_map(move |kind| match kind {
ty::PredicateKind::Projection(..)
| ty::PredicateKind::Trait(..)
| ty::PredicateKind::Coerce(..)
| ty::PredicateKind::Subtype(..)
| ty::PredicateKind::WellFormed(..)
| ty::PredicateKind::ObjectSafe(..)
| ty::PredicateKind::ClosureKind(..)
| ty::PredicateKind::TypeOutlives(..)
| ty::PredicateKind::ConstEvaluatable(..)
| ty::PredicateKind::ConstEquate(..)
| ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
Some(OutlivesBound::RegionSubRegion(r_b, r_a))
}
})
}
|
pub fn explicit_outlives_bounds<'tcx>(
|
random_line_split
|
mod.rs
|
//! Various code related to computing outlives relations.
pub mod env;
pub mod obligations;
pub mod verify;
use rustc_middle::traits::query::OutlivesBound;
use rustc_middle::ty;
pub fn explicit_outlives_bounds<'tcx>(
param_env: ty::ParamEnv<'tcx>,
) -> impl Iterator<Item = OutlivesBound<'tcx>> + 'tcx
|
Some(OutlivesBound::RegionSubRegion(r_b, r_a))
}
})
}
|
{
debug!("explicit_outlives_bounds()");
param_env
.caller_bounds()
.into_iter()
.map(ty::Predicate::kind)
.filter_map(ty::Binder::no_bound_vars)
.filter_map(move |kind| match kind {
ty::PredicateKind::Projection(..)
| ty::PredicateKind::Trait(..)
| ty::PredicateKind::Coerce(..)
| ty::PredicateKind::Subtype(..)
| ty::PredicateKind::WellFormed(..)
| ty::PredicateKind::ObjectSafe(..)
| ty::PredicateKind::ClosureKind(..)
| ty::PredicateKind::TypeOutlives(..)
| ty::PredicateKind::ConstEvaluatable(..)
| ty::PredicateKind::ConstEquate(..)
| ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
|
identifier_body
|
mod.rs
|
//! Various code related to computing outlives relations.
pub mod env;
pub mod obligations;
pub mod verify;
use rustc_middle::traits::query::OutlivesBound;
use rustc_middle::ty;
pub fn
|
<'tcx>(
param_env: ty::ParamEnv<'tcx>,
) -> impl Iterator<Item = OutlivesBound<'tcx>> + 'tcx {
debug!("explicit_outlives_bounds()");
param_env
.caller_bounds()
.into_iter()
.map(ty::Predicate::kind)
.filter_map(ty::Binder::no_bound_vars)
.filter_map(move |kind| match kind {
ty::PredicateKind::Projection(..)
| ty::PredicateKind::Trait(..)
| ty::PredicateKind::Coerce(..)
| ty::PredicateKind::Subtype(..)
| ty::PredicateKind::WellFormed(..)
| ty::PredicateKind::ObjectSafe(..)
| ty::PredicateKind::ClosureKind(..)
| ty::PredicateKind::TypeOutlives(..)
| ty::PredicateKind::ConstEvaluatable(..)
| ty::PredicateKind::ConstEquate(..)
| ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
Some(OutlivesBound::RegionSubRegion(r_b, r_a))
}
})
}
|
explicit_outlives_bounds
|
identifier_name
|
files.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::num::NonZeroU64;
use anyhow::{format_err, Context, Error};
use async_trait::async_trait;
use bytes::Bytes;
use context::PerfCounterType;
use futures::{stream, Stream, StreamExt, TryStreamExt};
use gotham::state::{FromState, State};
use gotham_derive::{StateData, StaticResponseExtender};
use hyper::Body;
use serde::Deserialize;
use std::str::FromStr;
use edenapi_types::{
wire::ToWire, AnyFileContentId, AnyId, Batch, FileAttributes, FileAuxData, FileContent,
FileContentTokenMetadata, FileEntry, FileRequest, FileResponse, FileSpec, ServerError,
UploadHgFilenodeRequest, UploadToken, UploadTokenMetadata, UploadTokensResponse,
};
use ephemeral_blobstore::BubbleId;
use gotham_ext::{error::HttpError, response::TryIntoResponse};
use mercurial_types::{HgFileNodeId, HgNodeHash};
use mononoke_api_hg::{HgDataContext, HgDataId, HgRepoContext};
use rate_limiting::Metric;
use types::Key;
use crate::context::ServerContext;
use crate::errors::ErrorKind;
use crate::middleware::RequestContext;
use crate::utils::{cbor_stream_filtered_errors, get_repo};
use super::{EdenApiHandler, EdenApiMethod, HandlerInfo, HandlerResult};
/// XXX: This number was chosen arbitrarily.
const MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST: usize = 10;
const MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST: usize = 1000;
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileParams {
repo: String,
idtype: String,
id: String,
}
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileQueryString {
bubble_id: Option<NonZeroU64>,
content_size: u64,
}
/// Fetch the content of the files requested by the client.
pub struct FilesHandler;
#[async_trait]
impl EdenApiHandler for FilesHandler {
type Request = FileRequest;
type Response = FileEntry;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files;
const ENDPOINT: &'static str = "/files";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| fetch_file(repo.clone(), key, attrs));
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect_ok(move |_| {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
})
.boxed())
}
}
/// Fetch the content of the files requested by the client.
pub struct Files2Handler;
#[async_trait]
impl EdenApiHandler for Files2Handler {
type Request = FileRequest;
type Response = FileResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files2;
const ENDPOINT: &'static str = "/files2";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response>
|
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect(move |response| {
if let Ok(result) = &response {
if result.result.is_ok() {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
}
}
})
.boxed())
}
}
async fn fetch_file_response(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileResponse, Error> {
let result = fetch_file(repo, key.clone(), attrs)
.await
.map_err(|e| ServerError::generic(format!("{}", e)));
Ok(FileResponse { key, result })
}
/// Fetch requested file for a single key.
/// Note that this function consumes the repo context in order
/// to construct a file context for the requested blob.
async fn fetch_file(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileEntry, Error> {
let id = HgFileNodeId::from_node_hash(HgNodeHash::from(key.hgid));
let ctx = id
.context(repo)
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?
.with_context(|| ErrorKind::KeyDoesNotExist(key.clone()))?;
let parents = ctx.hg_parents().into();
let mut file = FileEntry::new(key.clone(), parents);
if attrs.content {
let (data, metadata) = ctx
.content()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_content(FileContent {
hg_file_blob: data,
metadata,
});
}
if attrs.aux_data {
let content_metadata = ctx
.content_metadata()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_aux_data(FileAuxData {
total_size: content_metadata.total_size,
content_id: content_metadata.content_id.into(),
sha1: content_metadata.sha1.into(),
sha256: content_metadata.sha256.into(),
});
}
Ok(file)
}
/// Generate an upload token for alredy uploaded content
async fn generate_upload_token(
_repo: HgRepoContext,
id: AnyFileContentId,
content_size: u64,
bubble_id: Option<NonZeroU64>,
) -> Result<UploadToken, Error> {
// At first, returns a fake token
Ok(UploadToken::new_fake_token_with_metadata(
AnyId::AnyFileContentId(id),
bubble_id,
UploadTokenMetadata::FileContentTokenMetadata(FileContentTokenMetadata { content_size }),
))
}
/// Upload content of a file
async fn store_file(
repo: HgRepoContext,
id: AnyFileContentId,
data: impl Stream<Item = Result<Bytes, Error>> + Send,
content_size: u64,
bubble_id: Option<BubbleId>,
) -> Result<(), Error> {
repo.store_file(id, content_size, data, bubble_id).await?;
Ok(())
}
/// Upload content of a file requested by the client.
pub async fn upload_file(state: &mut State) -> Result<impl TryIntoResponse, HttpError> {
let params = UploadFileParams::take_from(state);
let query_string = UploadFileQueryString::take_from(state);
state.put(HandlerInfo::new(¶ms.repo, EdenApiMethod::UploadFile));
let rctx = RequestContext::borrow_from(state).clone();
let sctx = ServerContext::borrow_from(state);
let repo = get_repo(&sctx, &rctx, ¶ms.repo, None).await?;
let id = AnyFileContentId::from_str(&format!("{}/{}", ¶ms.idtype, ¶ms.id))
.map_err(HttpError::e400)?;
let body = Body::take_from(state).map_err(Error::from);
let content_size = query_string.content_size;
store_file(
repo.clone(),
id.clone(),
body,
content_size,
query_string.bubble_id.map(BubbleId::new),
)
.await
.map_err(HttpError::e500)?;
let token = generate_upload_token(repo, id, content_size, query_string.bubble_id)
.await
.map(|v| v.to_wire());
Ok(cbor_stream_filtered_errors(stream::iter(vec![token])))
}
/// Store the content of a single HgFilenode
async fn store_hg_filenode(
repo: HgRepoContext,
item: UploadHgFilenodeRequest,
) -> Result<UploadTokensResponse, Error> {
// TODO(liubovd): validate signature of the upload token (item.token) and
// return 'ErrorKind::UploadHgFilenodeRequestInvalidToken' if it's invalid.
// This will be added later, for now assume tokens are always valid.
let node_id = item.data.node_id;
let token = item.data.file_content_upload_token;
let filenode: HgFileNodeId = HgFileNodeId::from_node_hash(HgNodeHash::from(node_id));
let p1: Option<HgFileNodeId> = item
.data
.parents
.p1()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let p2: Option<HgFileNodeId> = item
.data
.parents
.p2()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let any_file_content_id = match token.data.id {
AnyId::AnyFileContentId(id) => Some(id),
_ => None,
}
.ok_or_else(|| {
ErrorKind::UploadHgFilenodeRequestInvalidToken(
node_id.clone(),
"the provided token is not for file content".into(),
)
})?;
let content_id = repo
.convert_file_to_content_id(any_file_content_id, None)
.await?
.ok_or_else(|| format_err!("File from upload token should be present"))?;
let content_size = match token.data.metadata {
Some(UploadTokenMetadata::FileContentTokenMetadata(meta)) => meta.content_size,
_ => repo.fetch_file_content_size(content_id, None).await?,
};
let metadata = Bytes::from(item.data.metadata);
repo.store_hg_filenode(filenode, p1, p2, content_id, content_size, metadata)
.await?;
Ok(UploadTokensResponse {
token: UploadToken::new_fake_token(AnyId::HgFilenodeId(node_id), None),
})
}
/// Upload list of hg filenodes requested by the client (batch request).
pub struct UploadHgFilenodesHandler;
#[async_trait]
impl EdenApiHandler for UploadHgFilenodesHandler {
type Request = Batch<UploadHgFilenodeRequest>;
type Response = UploadTokensResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::UploadHgFilenodes;
const ENDPOINT: &'static str = "/upload/filenodes";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let tokens = request
.batch
.into_iter()
.map(move |item| store_hg_filenode(repo.clone(), item));
Ok(stream::iter(tokens)
.buffer_unordered(MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST)
.boxed())
}
}
/// Downloads a file given an upload token
pub struct DownloadFileHandler;
#[async_trait]
impl EdenApiHandler for DownloadFileHandler {
type Request = UploadToken;
type Response = Bytes;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::DownloadFile;
const ENDPOINT: &'static str = "/download/file";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let content = repo
.download_file(request)
.await?
.context("File not found")?;
Ok(content.boxed())
}
}
|
{
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| {
fetch_file_response(repo.clone(), key.clone(), attrs)
});
|
identifier_body
|
files.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::num::NonZeroU64;
use anyhow::{format_err, Context, Error};
use async_trait::async_trait;
use bytes::Bytes;
use context::PerfCounterType;
use futures::{stream, Stream, StreamExt, TryStreamExt};
use gotham::state::{FromState, State};
use gotham_derive::{StateData, StaticResponseExtender};
use hyper::Body;
use serde::Deserialize;
use std::str::FromStr;
use edenapi_types::{
wire::ToWire, AnyFileContentId, AnyId, Batch, FileAttributes, FileAuxData, FileContent,
FileContentTokenMetadata, FileEntry, FileRequest, FileResponse, FileSpec, ServerError,
UploadHgFilenodeRequest, UploadToken, UploadTokenMetadata, UploadTokensResponse,
};
use ephemeral_blobstore::BubbleId;
use gotham_ext::{error::HttpError, response::TryIntoResponse};
use mercurial_types::{HgFileNodeId, HgNodeHash};
use mononoke_api_hg::{HgDataContext, HgDataId, HgRepoContext};
use rate_limiting::Metric;
use types::Key;
use crate::context::ServerContext;
use crate::errors::ErrorKind;
use crate::middleware::RequestContext;
use crate::utils::{cbor_stream_filtered_errors, get_repo};
use super::{EdenApiHandler, EdenApiMethod, HandlerInfo, HandlerResult};
/// XXX: This number was chosen arbitrarily.
const MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST: usize = 10;
const MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST: usize = 1000;
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileParams {
repo: String,
idtype: String,
id: String,
}
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileQueryString {
bubble_id: Option<NonZeroU64>,
content_size: u64,
}
/// Fetch the content of the files requested by the client.
pub struct FilesHandler;
#[async_trait]
impl EdenApiHandler for FilesHandler {
type Request = FileRequest;
type Response = FileEntry;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files;
const ENDPOINT: &'static str = "/files";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| fetch_file(repo.clone(), key, attrs));
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect_ok(move |_| {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
})
.boxed())
}
}
/// Fetch the content of the files requested by the client.
pub struct Files2Handler;
#[async_trait]
impl EdenApiHandler for Files2Handler {
type Request = FileRequest;
type Response = FileResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files2;
const ENDPOINT: &'static str = "/files2";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| {
fetch_file_response(repo.clone(), key.clone(), attrs)
});
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect(move |response| {
if let Ok(result) = &response {
if result.result.is_ok() {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
}
}
})
.boxed())
}
}
async fn fetch_file_response(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileResponse, Error> {
let result = fetch_file(repo, key.clone(), attrs)
.await
.map_err(|e| ServerError::generic(format!("{}", e)));
Ok(FileResponse { key, result })
}
/// Fetch requested file for a single key.
/// Note that this function consumes the repo context in order
/// to construct a file context for the requested blob.
async fn fetch_file(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileEntry, Error> {
let id = HgFileNodeId::from_node_hash(HgNodeHash::from(key.hgid));
let ctx = id
.context(repo)
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?
.with_context(|| ErrorKind::KeyDoesNotExist(key.clone()))?;
let parents = ctx.hg_parents().into();
let mut file = FileEntry::new(key.clone(), parents);
if attrs.content {
let (data, metadata) = ctx
.content()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_content(FileContent {
hg_file_blob: data,
metadata,
});
}
if attrs.aux_data {
let content_metadata = ctx
.content_metadata()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_aux_data(FileAuxData {
total_size: content_metadata.total_size,
content_id: content_metadata.content_id.into(),
sha1: content_metadata.sha1.into(),
sha256: content_metadata.sha256.into(),
});
}
Ok(file)
}
/// Generate an upload token for alredy uploaded content
async fn generate_upload_token(
_repo: HgRepoContext,
id: AnyFileContentId,
content_size: u64,
bubble_id: Option<NonZeroU64>,
) -> Result<UploadToken, Error> {
// At first, returns a fake token
Ok(UploadToken::new_fake_token_with_metadata(
AnyId::AnyFileContentId(id),
bubble_id,
UploadTokenMetadata::FileContentTokenMetadata(FileContentTokenMetadata { content_size }),
))
}
/// Upload content of a file
async fn store_file(
repo: HgRepoContext,
id: AnyFileContentId,
data: impl Stream<Item = Result<Bytes, Error>> + Send,
content_size: u64,
bubble_id: Option<BubbleId>,
) -> Result<(), Error> {
repo.store_file(id, content_size, data, bubble_id).await?;
Ok(())
}
/// Upload content of a file requested by the client.
pub async fn upload_file(state: &mut State) -> Result<impl TryIntoResponse, HttpError> {
let params = UploadFileParams::take_from(state);
let query_string = UploadFileQueryString::take_from(state);
state.put(HandlerInfo::new(¶ms.repo, EdenApiMethod::UploadFile));
let rctx = RequestContext::borrow_from(state).clone();
let sctx = ServerContext::borrow_from(state);
let repo = get_repo(&sctx, &rctx, ¶ms.repo, None).await?;
let id = AnyFileContentId::from_str(&format!("{}/{}", ¶ms.idtype, ¶ms.id))
.map_err(HttpError::e400)?;
let body = Body::take_from(state).map_err(Error::from);
let content_size = query_string.content_size;
store_file(
repo.clone(),
id.clone(),
body,
content_size,
query_string.bubble_id.map(BubbleId::new),
)
.await
.map_err(HttpError::e500)?;
let token = generate_upload_token(repo, id, content_size, query_string.bubble_id)
.await
.map(|v| v.to_wire());
Ok(cbor_stream_filtered_errors(stream::iter(vec![token])))
}
/// Store the content of a single HgFilenode
async fn store_hg_filenode(
repo: HgRepoContext,
item: UploadHgFilenodeRequest,
) -> Result<UploadTokensResponse, Error> {
// TODO(liubovd): validate signature of the upload token (item.token) and
// return 'ErrorKind::UploadHgFilenodeRequestInvalidToken' if it's invalid.
// This will be added later, for now assume tokens are always valid.
let node_id = item.data.node_id;
let token = item.data.file_content_upload_token;
let filenode: HgFileNodeId = HgFileNodeId::from_node_hash(HgNodeHash::from(node_id));
let p1: Option<HgFileNodeId> = item
.data
.parents
.p1()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let p2: Option<HgFileNodeId> = item
.data
.parents
.p2()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let any_file_content_id = match token.data.id {
AnyId::AnyFileContentId(id) => Some(id),
_ => None,
}
.ok_or_else(|| {
ErrorKind::UploadHgFilenodeRequestInvalidToken(
node_id.clone(),
"the provided token is not for file content".into(),
)
})?;
let content_id = repo
.convert_file_to_content_id(any_file_content_id, None)
.await?
.ok_or_else(|| format_err!("File from upload token should be present"))?;
let content_size = match token.data.metadata {
Some(UploadTokenMetadata::FileContentTokenMetadata(meta)) => meta.content_size,
_ => repo.fetch_file_content_size(content_id, None).await?,
};
let metadata = Bytes::from(item.data.metadata);
repo.store_hg_filenode(filenode, p1, p2, content_id, content_size, metadata)
.await?;
Ok(UploadTokensResponse {
token: UploadToken::new_fake_token(AnyId::HgFilenodeId(node_id), None),
})
}
/// Upload list of hg filenodes requested by the client (batch request).
pub struct UploadHgFilenodesHandler;
#[async_trait]
impl EdenApiHandler for UploadHgFilenodesHandler {
type Request = Batch<UploadHgFilenodeRequest>;
type Response = UploadTokensResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::UploadHgFilenodes;
|
const ENDPOINT: &'static str = "/upload/filenodes";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let tokens = request
.batch
.into_iter()
.map(move |item| store_hg_filenode(repo.clone(), item));
Ok(stream::iter(tokens)
.buffer_unordered(MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST)
.boxed())
}
}
/// Downloads a file given an upload token
pub struct DownloadFileHandler;
#[async_trait]
impl EdenApiHandler for DownloadFileHandler {
type Request = UploadToken;
type Response = Bytes;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::DownloadFile;
const ENDPOINT: &'static str = "/download/file";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let content = repo
.download_file(request)
.await?
.context("File not found")?;
Ok(content.boxed())
}
}
|
random_line_split
|
|
files.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::num::NonZeroU64;
use anyhow::{format_err, Context, Error};
use async_trait::async_trait;
use bytes::Bytes;
use context::PerfCounterType;
use futures::{stream, Stream, StreamExt, TryStreamExt};
use gotham::state::{FromState, State};
use gotham_derive::{StateData, StaticResponseExtender};
use hyper::Body;
use serde::Deserialize;
use std::str::FromStr;
use edenapi_types::{
wire::ToWire, AnyFileContentId, AnyId, Batch, FileAttributes, FileAuxData, FileContent,
FileContentTokenMetadata, FileEntry, FileRequest, FileResponse, FileSpec, ServerError,
UploadHgFilenodeRequest, UploadToken, UploadTokenMetadata, UploadTokensResponse,
};
use ephemeral_blobstore::BubbleId;
use gotham_ext::{error::HttpError, response::TryIntoResponse};
use mercurial_types::{HgFileNodeId, HgNodeHash};
use mononoke_api_hg::{HgDataContext, HgDataId, HgRepoContext};
use rate_limiting::Metric;
use types::Key;
use crate::context::ServerContext;
use crate::errors::ErrorKind;
use crate::middleware::RequestContext;
use crate::utils::{cbor_stream_filtered_errors, get_repo};
use super::{EdenApiHandler, EdenApiMethod, HandlerInfo, HandlerResult};
/// XXX: This number was chosen arbitrarily.
const MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST: usize = 10;
const MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST: usize = 1000;
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileParams {
repo: String,
idtype: String,
id: String,
}
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileQueryString {
bubble_id: Option<NonZeroU64>,
content_size: u64,
}
/// Fetch the content of the files requested by the client.
pub struct FilesHandler;
#[async_trait]
impl EdenApiHandler for FilesHandler {
type Request = FileRequest;
type Response = FileEntry;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files;
const ENDPOINT: &'static str = "/files";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| fetch_file(repo.clone(), key, attrs));
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect_ok(move |_| {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
})
.boxed())
}
}
/// Fetch the content of the files requested by the client.
pub struct Files2Handler;
#[async_trait]
impl EdenApiHandler for Files2Handler {
type Request = FileRequest;
type Response = FileResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files2;
const ENDPOINT: &'static str = "/files2";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| {
fetch_file_response(repo.clone(), key.clone(), attrs)
});
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect(move |response| {
if let Ok(result) = &response {
if result.result.is_ok()
|
}
})
.boxed())
}
}
async fn fetch_file_response(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileResponse, Error> {
let result = fetch_file(repo, key.clone(), attrs)
.await
.map_err(|e| ServerError::generic(format!("{}", e)));
Ok(FileResponse { key, result })
}
/// Fetch requested file for a single key.
/// Note that this function consumes the repo context in order
/// to construct a file context for the requested blob.
async fn fetch_file(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileEntry, Error> {
let id = HgFileNodeId::from_node_hash(HgNodeHash::from(key.hgid));
let ctx = id
.context(repo)
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?
.with_context(|| ErrorKind::KeyDoesNotExist(key.clone()))?;
let parents = ctx.hg_parents().into();
let mut file = FileEntry::new(key.clone(), parents);
if attrs.content {
let (data, metadata) = ctx
.content()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_content(FileContent {
hg_file_blob: data,
metadata,
});
}
if attrs.aux_data {
let content_metadata = ctx
.content_metadata()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_aux_data(FileAuxData {
total_size: content_metadata.total_size,
content_id: content_metadata.content_id.into(),
sha1: content_metadata.sha1.into(),
sha256: content_metadata.sha256.into(),
});
}
Ok(file)
}
/// Generate an upload token for alredy uploaded content
async fn generate_upload_token(
_repo: HgRepoContext,
id: AnyFileContentId,
content_size: u64,
bubble_id: Option<NonZeroU64>,
) -> Result<UploadToken, Error> {
// At first, returns a fake token
Ok(UploadToken::new_fake_token_with_metadata(
AnyId::AnyFileContentId(id),
bubble_id,
UploadTokenMetadata::FileContentTokenMetadata(FileContentTokenMetadata { content_size }),
))
}
/// Upload content of a file
async fn store_file(
repo: HgRepoContext,
id: AnyFileContentId,
data: impl Stream<Item = Result<Bytes, Error>> + Send,
content_size: u64,
bubble_id: Option<BubbleId>,
) -> Result<(), Error> {
repo.store_file(id, content_size, data, bubble_id).await?;
Ok(())
}
/// Upload content of a file requested by the client.
pub async fn upload_file(state: &mut State) -> Result<impl TryIntoResponse, HttpError> {
let params = UploadFileParams::take_from(state);
let query_string = UploadFileQueryString::take_from(state);
state.put(HandlerInfo::new(¶ms.repo, EdenApiMethod::UploadFile));
let rctx = RequestContext::borrow_from(state).clone();
let sctx = ServerContext::borrow_from(state);
let repo = get_repo(&sctx, &rctx, ¶ms.repo, None).await?;
let id = AnyFileContentId::from_str(&format!("{}/{}", ¶ms.idtype, ¶ms.id))
.map_err(HttpError::e400)?;
let body = Body::take_from(state).map_err(Error::from);
let content_size = query_string.content_size;
store_file(
repo.clone(),
id.clone(),
body,
content_size,
query_string.bubble_id.map(BubbleId::new),
)
.await
.map_err(HttpError::e500)?;
let token = generate_upload_token(repo, id, content_size, query_string.bubble_id)
.await
.map(|v| v.to_wire());
Ok(cbor_stream_filtered_errors(stream::iter(vec![token])))
}
/// Store the content of a single HgFilenode
async fn store_hg_filenode(
repo: HgRepoContext,
item: UploadHgFilenodeRequest,
) -> Result<UploadTokensResponse, Error> {
// TODO(liubovd): validate signature of the upload token (item.token) and
// return 'ErrorKind::UploadHgFilenodeRequestInvalidToken' if it's invalid.
// This will be added later, for now assume tokens are always valid.
let node_id = item.data.node_id;
let token = item.data.file_content_upload_token;
let filenode: HgFileNodeId = HgFileNodeId::from_node_hash(HgNodeHash::from(node_id));
let p1: Option<HgFileNodeId> = item
.data
.parents
.p1()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let p2: Option<HgFileNodeId> = item
.data
.parents
.p2()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let any_file_content_id = match token.data.id {
AnyId::AnyFileContentId(id) => Some(id),
_ => None,
}
.ok_or_else(|| {
ErrorKind::UploadHgFilenodeRequestInvalidToken(
node_id.clone(),
"the provided token is not for file content".into(),
)
})?;
let content_id = repo
.convert_file_to_content_id(any_file_content_id, None)
.await?
.ok_or_else(|| format_err!("File from upload token should be present"))?;
let content_size = match token.data.metadata {
Some(UploadTokenMetadata::FileContentTokenMetadata(meta)) => meta.content_size,
_ => repo.fetch_file_content_size(content_id, None).await?,
};
let metadata = Bytes::from(item.data.metadata);
repo.store_hg_filenode(filenode, p1, p2, content_id, content_size, metadata)
.await?;
Ok(UploadTokensResponse {
token: UploadToken::new_fake_token(AnyId::HgFilenodeId(node_id), None),
})
}
/// Upload list of hg filenodes requested by the client (batch request).
pub struct UploadHgFilenodesHandler;
#[async_trait]
impl EdenApiHandler for UploadHgFilenodesHandler {
type Request = Batch<UploadHgFilenodeRequest>;
type Response = UploadTokensResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::UploadHgFilenodes;
const ENDPOINT: &'static str = "/upload/filenodes";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let tokens = request
.batch
.into_iter()
.map(move |item| store_hg_filenode(repo.clone(), item));
Ok(stream::iter(tokens)
.buffer_unordered(MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST)
.boxed())
}
}
/// Downloads a file given an upload token
pub struct DownloadFileHandler;
#[async_trait]
impl EdenApiHandler for DownloadFileHandler {
type Request = UploadToken;
type Response = Bytes;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::DownloadFile;
const ENDPOINT: &'static str = "/download/file";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let content = repo
.download_file(request)
.await?
.context("File not found")?;
Ok(content.boxed())
}
}
|
{
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
}
|
conditional_block
|
files.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::num::NonZeroU64;
use anyhow::{format_err, Context, Error};
use async_trait::async_trait;
use bytes::Bytes;
use context::PerfCounterType;
use futures::{stream, Stream, StreamExt, TryStreamExt};
use gotham::state::{FromState, State};
use gotham_derive::{StateData, StaticResponseExtender};
use hyper::Body;
use serde::Deserialize;
use std::str::FromStr;
use edenapi_types::{
wire::ToWire, AnyFileContentId, AnyId, Batch, FileAttributes, FileAuxData, FileContent,
FileContentTokenMetadata, FileEntry, FileRequest, FileResponse, FileSpec, ServerError,
UploadHgFilenodeRequest, UploadToken, UploadTokenMetadata, UploadTokensResponse,
};
use ephemeral_blobstore::BubbleId;
use gotham_ext::{error::HttpError, response::TryIntoResponse};
use mercurial_types::{HgFileNodeId, HgNodeHash};
use mononoke_api_hg::{HgDataContext, HgDataId, HgRepoContext};
use rate_limiting::Metric;
use types::Key;
use crate::context::ServerContext;
use crate::errors::ErrorKind;
use crate::middleware::RequestContext;
use crate::utils::{cbor_stream_filtered_errors, get_repo};
use super::{EdenApiHandler, EdenApiMethod, HandlerInfo, HandlerResult};
/// XXX: This number was chosen arbitrarily.
const MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST: usize = 10;
const MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST: usize = 1000;
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileParams {
repo: String,
idtype: String,
id: String,
}
#[derive(Debug, Deserialize, StateData, StaticResponseExtender)]
pub struct UploadFileQueryString {
bubble_id: Option<NonZeroU64>,
content_size: u64,
}
/// Fetch the content of the files requested by the client.
pub struct FilesHandler;
#[async_trait]
impl EdenApiHandler for FilesHandler {
type Request = FileRequest;
type Response = FileEntry;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files;
const ENDPOINT: &'static str = "/files";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| fetch_file(repo.clone(), key, attrs));
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect_ok(move |_| {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
})
.boxed())
}
}
/// Fetch the content of the files requested by the client.
pub struct Files2Handler;
#[async_trait]
impl EdenApiHandler for Files2Handler {
type Request = FileRequest;
type Response = FileResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::Files2;
const ENDPOINT: &'static str = "/files2";
fn sampling_rate(request: &Self::Request) -> NonZeroU64 {
// Sample trivial requests
if request.keys.len() + request.reqs.len() == 1 {
nonzero_ext::nonzero!(100u64)
} else {
nonzero_ext::nonzero!(1u64)
}
}
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let ctx = repo.ctx().clone();
let len = request.keys.len() + request.reqs.len();
let reqs = request
.keys
.into_iter()
.map(|key| FileSpec {
key,
attrs: FileAttributes {
content: true,
aux_data: false,
},
})
.chain(request.reqs.into_iter());
ctx.perf_counters()
.add_to_counter(PerfCounterType::EdenapiFiles, len as i64);
let fetches = reqs.map(move |FileSpec { key, attrs }| {
fetch_file_response(repo.clone(), key.clone(), attrs)
});
Ok(stream::iter(fetches)
.buffer_unordered(MAX_CONCURRENT_FILE_FETCHES_PER_REQUEST)
.inspect(move |response| {
if let Ok(result) = &response {
if result.result.is_ok() {
ctx.session().bump_load(Metric::GetpackFiles, 1.0);
}
}
})
.boxed())
}
}
async fn fetch_file_response(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileResponse, Error> {
let result = fetch_file(repo, key.clone(), attrs)
.await
.map_err(|e| ServerError::generic(format!("{}", e)));
Ok(FileResponse { key, result })
}
/// Fetch requested file for a single key.
/// Note that this function consumes the repo context in order
/// to construct a file context for the requested blob.
async fn fetch_file(
repo: HgRepoContext,
key: Key,
attrs: FileAttributes,
) -> Result<FileEntry, Error> {
let id = HgFileNodeId::from_node_hash(HgNodeHash::from(key.hgid));
let ctx = id
.context(repo)
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?
.with_context(|| ErrorKind::KeyDoesNotExist(key.clone()))?;
let parents = ctx.hg_parents().into();
let mut file = FileEntry::new(key.clone(), parents);
if attrs.content {
let (data, metadata) = ctx
.content()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_content(FileContent {
hg_file_blob: data,
metadata,
});
}
if attrs.aux_data {
let content_metadata = ctx
.content_metadata()
.await
.with_context(|| ErrorKind::FileFetchFailed(key.clone()))?;
file = file.with_aux_data(FileAuxData {
total_size: content_metadata.total_size,
content_id: content_metadata.content_id.into(),
sha1: content_metadata.sha1.into(),
sha256: content_metadata.sha256.into(),
});
}
Ok(file)
}
/// Generate an upload token for alredy uploaded content
async fn generate_upload_token(
_repo: HgRepoContext,
id: AnyFileContentId,
content_size: u64,
bubble_id: Option<NonZeroU64>,
) -> Result<UploadToken, Error> {
// At first, returns a fake token
Ok(UploadToken::new_fake_token_with_metadata(
AnyId::AnyFileContentId(id),
bubble_id,
UploadTokenMetadata::FileContentTokenMetadata(FileContentTokenMetadata { content_size }),
))
}
/// Upload content of a file
async fn store_file(
repo: HgRepoContext,
id: AnyFileContentId,
data: impl Stream<Item = Result<Bytes, Error>> + Send,
content_size: u64,
bubble_id: Option<BubbleId>,
) -> Result<(), Error> {
repo.store_file(id, content_size, data, bubble_id).await?;
Ok(())
}
/// Upload content of a file requested by the client.
pub async fn upload_file(state: &mut State) -> Result<impl TryIntoResponse, HttpError> {
let params = UploadFileParams::take_from(state);
let query_string = UploadFileQueryString::take_from(state);
state.put(HandlerInfo::new(¶ms.repo, EdenApiMethod::UploadFile));
let rctx = RequestContext::borrow_from(state).clone();
let sctx = ServerContext::borrow_from(state);
let repo = get_repo(&sctx, &rctx, ¶ms.repo, None).await?;
let id = AnyFileContentId::from_str(&format!("{}/{}", ¶ms.idtype, ¶ms.id))
.map_err(HttpError::e400)?;
let body = Body::take_from(state).map_err(Error::from);
let content_size = query_string.content_size;
store_file(
repo.clone(),
id.clone(),
body,
content_size,
query_string.bubble_id.map(BubbleId::new),
)
.await
.map_err(HttpError::e500)?;
let token = generate_upload_token(repo, id, content_size, query_string.bubble_id)
.await
.map(|v| v.to_wire());
Ok(cbor_stream_filtered_errors(stream::iter(vec![token])))
}
/// Store the content of a single HgFilenode
async fn store_hg_filenode(
repo: HgRepoContext,
item: UploadHgFilenodeRequest,
) -> Result<UploadTokensResponse, Error> {
// TODO(liubovd): validate signature of the upload token (item.token) and
// return 'ErrorKind::UploadHgFilenodeRequestInvalidToken' if it's invalid.
// This will be added later, for now assume tokens are always valid.
let node_id = item.data.node_id;
let token = item.data.file_content_upload_token;
let filenode: HgFileNodeId = HgFileNodeId::from_node_hash(HgNodeHash::from(node_id));
let p1: Option<HgFileNodeId> = item
.data
.parents
.p1()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let p2: Option<HgFileNodeId> = item
.data
.parents
.p2()
.cloned()
.map(HgNodeHash::from)
.map(HgFileNodeId::from_node_hash);
let any_file_content_id = match token.data.id {
AnyId::AnyFileContentId(id) => Some(id),
_ => None,
}
.ok_or_else(|| {
ErrorKind::UploadHgFilenodeRequestInvalidToken(
node_id.clone(),
"the provided token is not for file content".into(),
)
})?;
let content_id = repo
.convert_file_to_content_id(any_file_content_id, None)
.await?
.ok_or_else(|| format_err!("File from upload token should be present"))?;
let content_size = match token.data.metadata {
Some(UploadTokenMetadata::FileContentTokenMetadata(meta)) => meta.content_size,
_ => repo.fetch_file_content_size(content_id, None).await?,
};
let metadata = Bytes::from(item.data.metadata);
repo.store_hg_filenode(filenode, p1, p2, content_id, content_size, metadata)
.await?;
Ok(UploadTokensResponse {
token: UploadToken::new_fake_token(AnyId::HgFilenodeId(node_id), None),
})
}
/// Upload list of hg filenodes requested by the client (batch request).
pub struct UploadHgFilenodesHandler;
#[async_trait]
impl EdenApiHandler for UploadHgFilenodesHandler {
type Request = Batch<UploadHgFilenodeRequest>;
type Response = UploadTokensResponse;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::UploadHgFilenodes;
const ENDPOINT: &'static str = "/upload/filenodes";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let tokens = request
.batch
.into_iter()
.map(move |item| store_hg_filenode(repo.clone(), item));
Ok(stream::iter(tokens)
.buffer_unordered(MAX_CONCURRENT_UPLOAD_FILENODES_PER_REQUEST)
.boxed())
}
}
/// Downloads a file given an upload token
pub struct
|
;
#[async_trait]
impl EdenApiHandler for DownloadFileHandler {
type Request = UploadToken;
type Response = Bytes;
const HTTP_METHOD: hyper::Method = hyper::Method::POST;
const API_METHOD: EdenApiMethod = EdenApiMethod::DownloadFile;
const ENDPOINT: &'static str = "/download/file";
async fn handler(
repo: HgRepoContext,
_path: Self::PathExtractor,
_query: Self::QueryStringExtractor,
request: Self::Request,
) -> HandlerResult<'async_trait, Self::Response> {
let content = repo
.download_file(request)
.await?
.context("File not found")?;
Ok(content.boxed())
}
}
|
DownloadFileHandler
|
identifier_name
|
errors.rs
|
use tokio_timer::TimerError;
use getopts;
use log;
use nom;
use std::cell;
use std::io;
use std::sync;
use toml;
use serde_json;
error_chain! {
foreign_links {
Timer(TimerError);
IO(io::Error);
SetLogger(log::SetLoggerError);
Getopts(getopts::Fail);
BorrowMut(cell::BorrowMutError);
JsonError(serde_json::Error);
}
errors {
Message(msg: String) {
description("error")
display("error: {}", msg)
}
Poison(msg: String) {
description("poison error")
display("poison error: {}", msg)
}
TomlParse(errors: Vec<toml::ParserError>) {
description("parse error")
display("parse error: {:?}", errors)
}
TomlDecode {
description("decode error")
display("decode error")
}
TomlKey(errors: Vec<toml::ParserError>) {
description("parse error")
display("parse error: {:?}", errors)
}
Config(path: String) {
|
ConfigSection(section: String) {
description("error in section")
display("error in section: {}", section)
}
ConfigField(field: String, reason: String) {
description("error in field")
display("error in field: {}: {}", field, reason)
}
MissingPlugin(key: String) {
description("no such plugin")
display("no such plugin: {}", key)
}
MissingField(name: String) {
description("missing field")
display("missing field: {}", name)
}
Nom(info: String) {
description("nom error")
display("nom error: {}", info)
}
Shutdown {
description("shutdown")
display("shutdown")
}
Poll {
}
Update {
}
Setup {
}
}
}
impl<T> From<sync::PoisonError<T>> for Error {
fn from(err: sync::PoisonError<T>) -> Error {
ErrorKind::Poison(err.to_string()).into()
}
}
impl From<nom::IError> for Error {
fn from(err: nom::IError) -> Error {
match err {
nom::IError::Error(err) => ErrorKind::Nom(err.to_string()).into(),
nom::IError::Incomplete(_) => ErrorKind::Nom("input incomplete".to_owned()).into(),
}
}
}
impl From<toml::DecodeError> for Error {
fn from(err: toml::DecodeError) -> Error {
if let Some(ref field) = err.field {
ErrorKind::ConfigField(field.clone(), format!("{}", err)).into()
} else {
ErrorKind::Message(format!("{}", err)).into()
}
}
}
|
description("error in config")
display("error in config: {}", path)
}
|
random_line_split
|
errors.rs
|
use tokio_timer::TimerError;
use getopts;
use log;
use nom;
use std::cell;
use std::io;
use std::sync;
use toml;
use serde_json;
error_chain! {
foreign_links {
Timer(TimerError);
IO(io::Error);
SetLogger(log::SetLoggerError);
Getopts(getopts::Fail);
BorrowMut(cell::BorrowMutError);
JsonError(serde_json::Error);
}
errors {
Message(msg: String) {
description("error")
display("error: {}", msg)
}
Poison(msg: String) {
description("poison error")
display("poison error: {}", msg)
}
TomlParse(errors: Vec<toml::ParserError>) {
description("parse error")
display("parse error: {:?}", errors)
}
TomlDecode {
description("decode error")
display("decode error")
}
TomlKey(errors: Vec<toml::ParserError>) {
description("parse error")
display("parse error: {:?}", errors)
}
Config(path: String) {
description("error in config")
display("error in config: {}", path)
}
ConfigSection(section: String) {
description("error in section")
display("error in section: {}", section)
}
ConfigField(field: String, reason: String) {
description("error in field")
display("error in field: {}: {}", field, reason)
}
MissingPlugin(key: String) {
description("no such plugin")
display("no such plugin: {}", key)
}
MissingField(name: String) {
description("missing field")
display("missing field: {}", name)
}
Nom(info: String) {
description("nom error")
display("nom error: {}", info)
}
Shutdown {
description("shutdown")
display("shutdown")
}
Poll {
}
Update {
}
Setup {
}
}
}
impl<T> From<sync::PoisonError<T>> for Error {
fn from(err: sync::PoisonError<T>) -> Error {
ErrorKind::Poison(err.to_string()).into()
}
}
impl From<nom::IError> for Error {
fn from(err: nom::IError) -> Error {
match err {
nom::IError::Error(err) => ErrorKind::Nom(err.to_string()).into(),
nom::IError::Incomplete(_) => ErrorKind::Nom("input incomplete".to_owned()).into(),
}
}
}
impl From<toml::DecodeError> for Error {
fn
|
(err: toml::DecodeError) -> Error {
if let Some(ref field) = err.field {
ErrorKind::ConfigField(field.clone(), format!("{}", err)).into()
} else {
ErrorKind::Message(format!("{}", err)).into()
}
}
}
|
from
|
identifier_name
|
errors.rs
|
use tokio_timer::TimerError;
use getopts;
use log;
use nom;
use std::cell;
use std::io;
use std::sync;
use toml;
use serde_json;
error_chain! {
foreign_links {
Timer(TimerError);
IO(io::Error);
SetLogger(log::SetLoggerError);
Getopts(getopts::Fail);
BorrowMut(cell::BorrowMutError);
JsonError(serde_json::Error);
}
errors {
Message(msg: String) {
description("error")
display("error: {}", msg)
}
Poison(msg: String) {
description("poison error")
display("poison error: {}", msg)
}
TomlParse(errors: Vec<toml::ParserError>) {
description("parse error")
display("parse error: {:?}", errors)
}
TomlDecode {
description("decode error")
display("decode error")
}
TomlKey(errors: Vec<toml::ParserError>) {
description("parse error")
display("parse error: {:?}", errors)
}
Config(path: String) {
description("error in config")
display("error in config: {}", path)
}
ConfigSection(section: String) {
description("error in section")
display("error in section: {}", section)
}
ConfigField(field: String, reason: String) {
description("error in field")
display("error in field: {}: {}", field, reason)
}
MissingPlugin(key: String) {
description("no such plugin")
display("no such plugin: {}", key)
}
MissingField(name: String) {
description("missing field")
display("missing field: {}", name)
}
Nom(info: String) {
description("nom error")
display("nom error: {}", info)
}
Shutdown {
description("shutdown")
display("shutdown")
}
Poll {
}
Update {
}
Setup {
}
}
}
impl<T> From<sync::PoisonError<T>> for Error {
fn from(err: sync::PoisonError<T>) -> Error
|
}
impl From<nom::IError> for Error {
fn from(err: nom::IError) -> Error {
match err {
nom::IError::Error(err) => ErrorKind::Nom(err.to_string()).into(),
nom::IError::Incomplete(_) => ErrorKind::Nom("input incomplete".to_owned()).into(),
}
}
}
impl From<toml::DecodeError> for Error {
fn from(err: toml::DecodeError) -> Error {
if let Some(ref field) = err.field {
ErrorKind::ConfigField(field.clone(), format!("{}", err)).into()
} else {
ErrorKind::Message(format!("{}", err)).into()
}
}
}
|
{
ErrorKind::Poison(err.to_string()).into()
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.