file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
addressbook_send.rs | // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
extern crate capnp;
extern crate core;
pub mod addressbook_capnp {
include!(concat!(env!("OUT_DIR"), "/addressbook_capnp.rs"));
}
use capnp::message::{Builder, HeapAllocator, TypedReader};
use std::sync::mpsc;
use std::thread;
pub mod addressbook {
use addressbook_capnp::{address_book, person};
use capnp::message::{Builder, HeapAllocator, TypedReader};
pub fn | () -> TypedReader<Builder<HeapAllocator>, address_book::Owned> {
let mut message = Builder::new_default();
{
let address_book = message.init_root::<address_book::Builder>();
let mut people = address_book.init_people(2);
{
let mut alice = people.reborrow().get(0);
alice.set_id(123);
alice.set_name("Alice");
alice.set_email("[email protected]");
{
let mut alice_phones = alice.reborrow().init_phones(1);
alice_phones.reborrow().get(0).set_number("555-1212");
alice_phones.reborrow().get(0).set_type(person::phone_number::Type::Mobile);
}
alice.get_employment().set_school("MIT");
}
{
let mut bob = people.get(1);
bob.set_id(456);
bob.set_name("Bob");
bob.set_email("[email protected]");
{
let mut bob_phones = bob.reborrow().init_phones(2);
bob_phones.reborrow().get(0).set_number("555-4567");
bob_phones.reborrow().get(0).set_type(person::phone_number::Type::Home);
bob_phones.reborrow().get(1).set_number("555-7654");
bob_phones.reborrow().get(1).set_type(person::phone_number::Type::Work);
}
bob.get_employment().set_unemployed(());
}
}
// There are two ways to get a TypedReader from our `message`:
//
// Option 1: Go through the full process manually
// message.into_reader().into_typed()
//
// Option 2: Use the "Into" trait defined on the builder
// message.into()
//
// Option 3: Use the "From" trait defined on the builder
TypedReader::from(message)
}
}
pub fn main() {
let book = addressbook::build_address_book();
let (tx_book, rx_book) = mpsc::channel::<TypedReader<Builder<HeapAllocator>, addressbook_capnp::address_book::Owned>>();
let (tx_id, rx_id) = mpsc::channel::<u32>();
thread::spawn(move || {
let addressbook_reader = rx_book.recv().unwrap();
let addressbook = addressbook_reader.get().unwrap();
let first_person = addressbook.get_people().unwrap().get(0);
let first_id = first_person.get_id();
tx_id.send(first_id)
});
tx_book.send(book).unwrap();
let first_id = rx_id.recv().unwrap();
assert_eq!(first_id, 123);
}
| build_address_book | identifier_name |
addressbook_send.rs | // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
extern crate capnp;
extern crate core;
pub mod addressbook_capnp {
include!(concat!(env!("OUT_DIR"), "/addressbook_capnp.rs"));
}
use capnp::message::{Builder, HeapAllocator, TypedReader};
use std::sync::mpsc;
use std::thread;
pub mod addressbook {
use addressbook_capnp::{address_book, person};
use capnp::message::{Builder, HeapAllocator, TypedReader};
pub fn build_address_book() -> TypedReader<Builder<HeapAllocator>, address_book::Owned> {
let mut message = Builder::new_default();
{
let address_book = message.init_root::<address_book::Builder>();
let mut people = address_book.init_people(2);
{
let mut alice = people.reborrow().get(0);
alice.set_id(123);
alice.set_name("Alice");
alice.set_email("[email protected]");
{
let mut alice_phones = alice.reborrow().init_phones(1);
alice_phones.reborrow().get(0).set_number("555-1212");
alice_phones.reborrow().get(0).set_type(person::phone_number::Type::Mobile);
}
alice.get_employment().set_school("MIT");
}
{
let mut bob = people.get(1);
bob.set_id(456);
bob.set_name("Bob");
bob.set_email("[email protected]");
{
let mut bob_phones = bob.reborrow().init_phones(2);
bob_phones.reborrow().get(0).set_number("555-4567");
bob_phones.reborrow().get(0).set_type(person::phone_number::Type::Home);
bob_phones.reborrow().get(1).set_number("555-7654");
bob_phones.reborrow().get(1).set_type(person::phone_number::Type::Work);
}
bob.get_employment().set_unemployed(());
}
}
// There are two ways to get a TypedReader from our `message`:
//
// Option 1: Go through the full process manually
// message.into_reader().into_typed()
//
// Option 2: Use the "Into" trait defined on the builder
// message.into()
//
// Option 3: Use the "From" trait defined on the builder
TypedReader::from(message)
}
}
pub fn main() | {
let book = addressbook::build_address_book();
let (tx_book, rx_book) = mpsc::channel::<TypedReader<Builder<HeapAllocator>, addressbook_capnp::address_book::Owned>>();
let (tx_id, rx_id) = mpsc::channel::<u32>();
thread::spawn(move || {
let addressbook_reader = rx_book.recv().unwrap();
let addressbook = addressbook_reader.get().unwrap();
let first_person = addressbook.get_people().unwrap().get(0);
let first_id = first_person.get_id();
tx_id.send(first_id)
});
tx_book.send(book).unwrap();
let first_id = rx_id.recv().unwrap();
assert_eq!(first_id, 123);
} | identifier_body |
|
htmllielement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLLIElementBinding;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct | {
htmlelement: HTMLElement,
}
impl HTMLLIElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLLIElement {
HTMLLIElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLLIElement> {
let element = HTMLLIElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLLIElementBinding::Wrap)
}
}
| HTMLLIElement | identifier_name |
htmllielement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLLIElementBinding;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLLIElement {
htmlelement: HTMLElement,
}
impl HTMLLIElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLLIElement {
HTMLLIElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)] | Node::reflect_node(box element, document, HTMLLIElementBinding::Wrap)
}
} | pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLLIElement> {
let element = HTMLLIElement::new_inherited(localName, prefix, document); | random_line_split |
htmllielement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLLIElementBinding;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLLIElement {
htmlelement: HTMLElement,
}
impl HTMLLIElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLLIElement {
HTMLLIElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLLIElement> |
}
| {
let element = HTMLLIElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLLIElementBinding::Wrap)
} | identifier_body |
trait-bounds-in-arc.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between tasks as long as all types fulfill Send.
// ignore-pretty
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(unboxed_closures)]
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread::Thread;
trait Pet {
fn name(&self, blk: Box<FnMut(&str)>);
fn num_legs(&self) -> uint;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: uint,
name: String,
}
struct Dogge {
bark_decibels: uint,
tricks_known: uint,
name: String,
}
struct Goldfyshe {
swim_speed: uint,
name: String,
}
impl Pet for Catte {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_string() };
let dogge1 = Dogge {
bark_decibels: 100,
tricks_known: 42,
name: "alan_turing".to_string(),
};
let dogge2 = Dogge {
bark_decibels: 55,
tricks_known: 11,
name: "albert_einstein".to_string(),
};
let fishe = Goldfyshe {
swim_speed: 998,
name: "alec_guinness".to_string(),
};
let arc = Arc::new(vec!(box catte as Box<Pet+Sync+Send>,
box dogge1 as Box<Pet+Sync+Send>,
box fishe as Box<Pet+Sync+Send>,
box dogge2 as Box<Pet+Sync+Send>));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
let _t1 = Thread::spawn(move|| { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
let _t2 = Thread::spawn(move|| { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
let _t3 = Thread::spawn(move|| { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
}
fn check_legs(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
let mut legs = 0;
for pet in arc.iter() {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in arc.iter() {
pet.name(box |name| {
assert!(name.as_bytes()[0] == 'a' as u8 && name.as_bytes()[1] == 'l' as u8);
})
}
}
fn check_pedigree(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in arc.iter() {
assert!(pet.of_good_pedigree());
}
} | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
trait-bounds-in-arc.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between tasks as long as all types fulfill Send.
// ignore-pretty
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(unboxed_closures)]
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread::Thread;
trait Pet {
fn name(&self, blk: Box<FnMut(&str)>);
fn num_legs(&self) -> uint;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: uint,
name: String,
}
struct Dogge {
bark_decibels: uint,
tricks_known: uint,
name: String,
}
struct Goldfyshe {
swim_speed: uint,
name: String,
}
impl Pet for Catte {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 4 }
fn | (&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_string() };
let dogge1 = Dogge {
bark_decibels: 100,
tricks_known: 42,
name: "alan_turing".to_string(),
};
let dogge2 = Dogge {
bark_decibels: 55,
tricks_known: 11,
name: "albert_einstein".to_string(),
};
let fishe = Goldfyshe {
swim_speed: 998,
name: "alec_guinness".to_string(),
};
let arc = Arc::new(vec!(box catte as Box<Pet+Sync+Send>,
box dogge1 as Box<Pet+Sync+Send>,
box fishe as Box<Pet+Sync+Send>,
box dogge2 as Box<Pet+Sync+Send>));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
let _t1 = Thread::spawn(move|| { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
let _t2 = Thread::spawn(move|| { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
let _t3 = Thread::spawn(move|| { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
}
fn check_legs(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
let mut legs = 0;
for pet in arc.iter() {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in arc.iter() {
pet.name(box |name| {
assert!(name.as_bytes()[0] == 'a' as u8 && name.as_bytes()[1] == 'l' as u8);
})
}
}
fn check_pedigree(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in arc.iter() {
assert!(pet.of_good_pedigree());
}
}
| of_good_pedigree | identifier_name |
trait-bounds-in-arc.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between tasks as long as all types fulfill Send.
// ignore-pretty
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(unboxed_closures)]
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread::Thread;
trait Pet {
fn name(&self, blk: Box<FnMut(&str)>);
fn num_legs(&self) -> uint;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: uint,
name: String,
}
struct Dogge {
bark_decibels: uint,
tricks_known: uint,
name: String,
}
struct Goldfyshe {
swim_speed: uint,
name: String,
}
impl Pet for Catte {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(self.name.as_slice()) }
fn num_legs(&self) -> uint { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() | let (tx1, rx1) = channel();
let arc1 = arc.clone();
let _t1 = Thread::spawn(move|| { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
let _t2 = Thread::spawn(move|| { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
let _t3 = Thread::spawn(move|| { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
}
fn check_legs(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
let mut legs = 0;
for pet in arc.iter() {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in arc.iter() {
pet.name(box |name| {
assert!(name.as_bytes()[0] == 'a' as u8 && name.as_bytes()[1] == 'l' as u8);
})
}
}
fn check_pedigree(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in arc.iter() {
assert!(pet.of_good_pedigree());
}
}
| {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_string() };
let dogge1 = Dogge {
bark_decibels: 100,
tricks_known: 42,
name: "alan_turing".to_string(),
};
let dogge2 = Dogge {
bark_decibels: 55,
tricks_known: 11,
name: "albert_einstein".to_string(),
};
let fishe = Goldfyshe {
swim_speed: 998,
name: "alec_guinness".to_string(),
};
let arc = Arc::new(vec!(box catte as Box<Pet+Sync+Send>,
box dogge1 as Box<Pet+Sync+Send>,
box fishe as Box<Pet+Sync+Send>,
box dogge2 as Box<Pet+Sync+Send>)); | identifier_body |
check_txn_status.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use txn_types::{Key, Lock, TimeStamp, WriteType};
use crate::storage::{
mvcc::txn::MissingLockAction,
mvcc::{
metrics::MVCC_CHECK_TXN_STATUS_COUNTER_VEC, ErrorInner, LockType, MvccTxn, ReleasedLock,
Result, TxnCommitRecord,
},
Snapshot, TxnStatus,
};
pub fn check_txn_status_lock_exists<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mut lock: Lock,
current_ts: TimeStamp,
caller_start_ts: TimeStamp,
force_sync_commit: bool,
resolving_pessimistic_lock: bool,
) -> Result<(TxnStatus, Option<ReleasedLock>)> {
// Never rollback or push forward min_commit_ts in check_txn_status if it's using async commit.
// Rollback of async-commit locks are done during ResolveLock.
if lock.use_async_commit {
if force_sync_commit {
info!(
"fallback is set, check_txn_status treats it as a non-async-commit txn";
"start_ts" => txn.start_ts,
"primary_key" =>?primary_key,
);
} else {
return Ok((TxnStatus::uncommitted(lock, false), None));
}
}
let is_pessimistic_txn =!lock.for_update_ts.is_zero();
if lock.ts.physical() + lock.ttl < current_ts.physical() {
// If the lock is expired, clean it up.
// If the resolving and primary key lock are both pessimistic locks, just unlock the
// primary pessimistic lock and do not write rollback records.
return if resolving_pessimistic_lock && lock.lock_type == LockType::Pessimistic {
let released = txn.unlock_key(primary_key, is_pessimistic_txn);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.pessimistic_rollback.inc();
Ok((TxnStatus::PessimisticRollBack, released))
} else {
let released =
txn.check_write_and_rollback_lock(primary_key, &lock, is_pessimistic_txn)?;
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok((TxnStatus::TtlExpire, released))
};
}
// If lock.min_commit_ts is 0, it's not a large transaction and we can't push forward
// its min_commit_ts otherwise the transaction can't be committed by old version TiDB
// during rolling update.
if!lock.min_commit_ts.is_zero()
&&!caller_start_ts.is_max()
// Push forward the min_commit_ts so that reading won't be blocked by locks.
&& caller_start_ts >= lock.min_commit_ts
{
lock.min_commit_ts = caller_start_ts.next();
if lock.min_commit_ts < current_ts {
lock.min_commit_ts = current_ts;
}
txn.put_lock(primary_key, &lock);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.update_ts.inc();
}
// As long as the primary lock's min_commit_ts > caller_start_ts, locks belong to the same transaction
// can't block reading. Return MinCommitTsPushed result to the client to let it bypass locks.
let min_commit_ts_pushed = (!caller_start_ts.is_zero() && lock.min_commit_ts > caller_start_ts)
// If the caller_start_ts is max, it's a point get in the autocommit transaction.
// We don't push forward lock's min_commit_ts and the point get can ignore the lock
// next time because it's not committed yet.
|| caller_start_ts.is_max();
Ok((TxnStatus::uncommitted(lock, min_commit_ts_pushed), None))
}
pub fn | <S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mismatch_lock: Option<Lock>,
action: MissingLockAction,
resolving_pessimistic_lock: bool,
) -> Result<TxnStatus> {
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.get_commit_info.inc();
match txn
.reader
.get_txn_commit_record(&primary_key, txn.start_ts)?
{
TxnCommitRecord::SingleRecord { commit_ts, write } => {
if write.write_type == WriteType::Rollback {
Ok(TxnStatus::RolledBack)
} else {
Ok(TxnStatus::committed(commit_ts))
}
}
TxnCommitRecord::OverlappedRollback {.. } => Ok(TxnStatus::RolledBack),
TxnCommitRecord::None { overlapped_write } => {
if MissingLockAction::ReturnError == action {
return Err(ErrorInner::TxnNotFound {
start_ts: txn.start_ts,
key: primary_key.into_raw()?,
}
.into());
}
if resolving_pessimistic_lock {
return Ok(TxnStatus::LockNotExistDoNothing);
}
let ts = txn.start_ts;
// collapse previous rollback if exist.
if txn.collapse_rollback {
txn.collapse_prev_rollback(primary_key.clone())?;
}
if let (Some(l), None) = (mismatch_lock, overlapped_write.as_ref()) {
txn.mark_rollback_on_mismatching_lock(
&primary_key,
l,
action == MissingLockAction::ProtectedRollback,
);
}
// Insert a Rollback to Write CF in case that a stale prewrite
// command is received after a cleanup command.
if let Some(write) = action.construct_write(ts, overlapped_write) {
txn.put_write(primary_key, ts, write.as_ref().to_bytes());
}
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok(TxnStatus::LockNotExist)
}
}
}
| check_txn_status_missing_lock | identifier_name |
check_txn_status.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use txn_types::{Key, Lock, TimeStamp, WriteType};
use crate::storage::{
mvcc::txn::MissingLockAction,
mvcc::{
metrics::MVCC_CHECK_TXN_STATUS_COUNTER_VEC, ErrorInner, LockType, MvccTxn, ReleasedLock,
Result, TxnCommitRecord,
},
Snapshot, TxnStatus,
};
pub fn check_txn_status_lock_exists<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mut lock: Lock,
current_ts: TimeStamp,
caller_start_ts: TimeStamp,
force_sync_commit: bool,
resolving_pessimistic_lock: bool,
) -> Result<(TxnStatus, Option<ReleasedLock>)> {
// Never rollback or push forward min_commit_ts in check_txn_status if it's using async commit.
// Rollback of async-commit locks are done during ResolveLock.
if lock.use_async_commit {
if force_sync_commit {
info!(
"fallback is set, check_txn_status treats it as a non-async-commit txn";
"start_ts" => txn.start_ts,
"primary_key" =>?primary_key,
);
} else {
return Ok((TxnStatus::uncommitted(lock, false), None));
}
}
let is_pessimistic_txn =!lock.for_update_ts.is_zero();
if lock.ts.physical() + lock.ttl < current_ts.physical() {
// If the lock is expired, clean it up.
// If the resolving and primary key lock are both pessimistic locks, just unlock the
// primary pessimistic lock and do not write rollback records.
return if resolving_pessimistic_lock && lock.lock_type == LockType::Pessimistic {
let released = txn.unlock_key(primary_key, is_pessimistic_txn);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.pessimistic_rollback.inc();
Ok((TxnStatus::PessimisticRollBack, released))
} else {
let released =
txn.check_write_and_rollback_lock(primary_key, &lock, is_pessimistic_txn)?;
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok((TxnStatus::TtlExpire, released))
};
}
// If lock.min_commit_ts is 0, it's not a large transaction and we can't push forward
// its min_commit_ts otherwise the transaction can't be committed by old version TiDB
// during rolling update.
if!lock.min_commit_ts.is_zero()
&&!caller_start_ts.is_max()
// Push forward the min_commit_ts so that reading won't be blocked by locks.
&& caller_start_ts >= lock.min_commit_ts
{
lock.min_commit_ts = caller_start_ts.next();
| txn.put_lock(primary_key, &lock);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.update_ts.inc();
}
// As long as the primary lock's min_commit_ts > caller_start_ts, locks belong to the same transaction
// can't block reading. Return MinCommitTsPushed result to the client to let it bypass locks.
let min_commit_ts_pushed = (!caller_start_ts.is_zero() && lock.min_commit_ts > caller_start_ts)
// If the caller_start_ts is max, it's a point get in the autocommit transaction.
// We don't push forward lock's min_commit_ts and the point get can ignore the lock
// next time because it's not committed yet.
|| caller_start_ts.is_max();
Ok((TxnStatus::uncommitted(lock, min_commit_ts_pushed), None))
}
pub fn check_txn_status_missing_lock<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mismatch_lock: Option<Lock>,
action: MissingLockAction,
resolving_pessimistic_lock: bool,
) -> Result<TxnStatus> {
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.get_commit_info.inc();
match txn
.reader
.get_txn_commit_record(&primary_key, txn.start_ts)?
{
TxnCommitRecord::SingleRecord { commit_ts, write } => {
if write.write_type == WriteType::Rollback {
Ok(TxnStatus::RolledBack)
} else {
Ok(TxnStatus::committed(commit_ts))
}
}
TxnCommitRecord::OverlappedRollback {.. } => Ok(TxnStatus::RolledBack),
TxnCommitRecord::None { overlapped_write } => {
if MissingLockAction::ReturnError == action {
return Err(ErrorInner::TxnNotFound {
start_ts: txn.start_ts,
key: primary_key.into_raw()?,
}
.into());
}
if resolving_pessimistic_lock {
return Ok(TxnStatus::LockNotExistDoNothing);
}
let ts = txn.start_ts;
// collapse previous rollback if exist.
if txn.collapse_rollback {
txn.collapse_prev_rollback(primary_key.clone())?;
}
if let (Some(l), None) = (mismatch_lock, overlapped_write.as_ref()) {
txn.mark_rollback_on_mismatching_lock(
&primary_key,
l,
action == MissingLockAction::ProtectedRollback,
);
}
// Insert a Rollback to Write CF in case that a stale prewrite
// command is received after a cleanup command.
if let Some(write) = action.construct_write(ts, overlapped_write) {
txn.put_write(primary_key, ts, write.as_ref().to_bytes());
}
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok(TxnStatus::LockNotExist)
}
}
} | if lock.min_commit_ts < current_ts {
lock.min_commit_ts = current_ts;
}
| random_line_split |
check_txn_status.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use txn_types::{Key, Lock, TimeStamp, WriteType};
use crate::storage::{
mvcc::txn::MissingLockAction,
mvcc::{
metrics::MVCC_CHECK_TXN_STATUS_COUNTER_VEC, ErrorInner, LockType, MvccTxn, ReleasedLock,
Result, TxnCommitRecord,
},
Snapshot, TxnStatus,
};
pub fn check_txn_status_lock_exists<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mut lock: Lock,
current_ts: TimeStamp,
caller_start_ts: TimeStamp,
force_sync_commit: bool,
resolving_pessimistic_lock: bool,
) -> Result<(TxnStatus, Option<ReleasedLock>)> {
// Never rollback or push forward min_commit_ts in check_txn_status if it's using async commit.
// Rollback of async-commit locks are done during ResolveLock.
if lock.use_async_commit {
if force_sync_commit {
info!(
"fallback is set, check_txn_status treats it as a non-async-commit txn";
"start_ts" => txn.start_ts,
"primary_key" =>?primary_key,
);
} else {
return Ok((TxnStatus::uncommitted(lock, false), None));
}
}
let is_pessimistic_txn =!lock.for_update_ts.is_zero();
if lock.ts.physical() + lock.ttl < current_ts.physical() {
// If the lock is expired, clean it up.
// If the resolving and primary key lock are both pessimistic locks, just unlock the
// primary pessimistic lock and do not write rollback records.
return if resolving_pessimistic_lock && lock.lock_type == LockType::Pessimistic {
let released = txn.unlock_key(primary_key, is_pessimistic_txn);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.pessimistic_rollback.inc();
Ok((TxnStatus::PessimisticRollBack, released))
} else {
let released =
txn.check_write_and_rollback_lock(primary_key, &lock, is_pessimistic_txn)?;
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok((TxnStatus::TtlExpire, released))
};
}
// If lock.min_commit_ts is 0, it's not a large transaction and we can't push forward
// its min_commit_ts otherwise the transaction can't be committed by old version TiDB
// during rolling update.
if!lock.min_commit_ts.is_zero()
&&!caller_start_ts.is_max()
// Push forward the min_commit_ts so that reading won't be blocked by locks.
&& caller_start_ts >= lock.min_commit_ts
{
lock.min_commit_ts = caller_start_ts.next();
if lock.min_commit_ts < current_ts {
lock.min_commit_ts = current_ts;
}
txn.put_lock(primary_key, &lock);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.update_ts.inc();
}
// As long as the primary lock's min_commit_ts > caller_start_ts, locks belong to the same transaction
// can't block reading. Return MinCommitTsPushed result to the client to let it bypass locks.
let min_commit_ts_pushed = (!caller_start_ts.is_zero() && lock.min_commit_ts > caller_start_ts)
// If the caller_start_ts is max, it's a point get in the autocommit transaction.
// We don't push forward lock's min_commit_ts and the point get can ignore the lock
// next time because it's not committed yet.
|| caller_start_ts.is_max();
Ok((TxnStatus::uncommitted(lock, min_commit_ts_pushed), None))
}
pub fn check_txn_status_missing_lock<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mismatch_lock: Option<Lock>,
action: MissingLockAction,
resolving_pessimistic_lock: bool,
) -> Result<TxnStatus> | }
.into());
}
if resolving_pessimistic_lock {
return Ok(TxnStatus::LockNotExistDoNothing);
}
let ts = txn.start_ts;
// collapse previous rollback if exist.
if txn.collapse_rollback {
txn.collapse_prev_rollback(primary_key.clone())?;
}
if let (Some(l), None) = (mismatch_lock, overlapped_write.as_ref()) {
txn.mark_rollback_on_mismatching_lock(
&primary_key,
l,
action == MissingLockAction::ProtectedRollback,
);
}
// Insert a Rollback to Write CF in case that a stale prewrite
// command is received after a cleanup command.
if let Some(write) = action.construct_write(ts, overlapped_write) {
txn.put_write(primary_key, ts, write.as_ref().to_bytes());
}
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok(TxnStatus::LockNotExist)
}
}
}
| {
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.get_commit_info.inc();
match txn
.reader
.get_txn_commit_record(&primary_key, txn.start_ts)?
{
TxnCommitRecord::SingleRecord { commit_ts, write } => {
if write.write_type == WriteType::Rollback {
Ok(TxnStatus::RolledBack)
} else {
Ok(TxnStatus::committed(commit_ts))
}
}
TxnCommitRecord::OverlappedRollback { .. } => Ok(TxnStatus::RolledBack),
TxnCommitRecord::None { overlapped_write } => {
if MissingLockAction::ReturnError == action {
return Err(ErrorInner::TxnNotFound {
start_ts: txn.start_ts,
key: primary_key.into_raw()?, | identifier_body |
check_txn_status.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use txn_types::{Key, Lock, TimeStamp, WriteType};
use crate::storage::{
mvcc::txn::MissingLockAction,
mvcc::{
metrics::MVCC_CHECK_TXN_STATUS_COUNTER_VEC, ErrorInner, LockType, MvccTxn, ReleasedLock,
Result, TxnCommitRecord,
},
Snapshot, TxnStatus,
};
pub fn check_txn_status_lock_exists<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mut lock: Lock,
current_ts: TimeStamp,
caller_start_ts: TimeStamp,
force_sync_commit: bool,
resolving_pessimistic_lock: bool,
) -> Result<(TxnStatus, Option<ReleasedLock>)> {
// Never rollback or push forward min_commit_ts in check_txn_status if it's using async commit.
// Rollback of async-commit locks are done during ResolveLock.
if lock.use_async_commit {
if force_sync_commit {
info!(
"fallback is set, check_txn_status treats it as a non-async-commit txn";
"start_ts" => txn.start_ts,
"primary_key" =>?primary_key,
);
} else {
return Ok((TxnStatus::uncommitted(lock, false), None));
}
}
let is_pessimistic_txn =!lock.for_update_ts.is_zero();
if lock.ts.physical() + lock.ttl < current_ts.physical() {
// If the lock is expired, clean it up.
// If the resolving and primary key lock are both pessimistic locks, just unlock the
// primary pessimistic lock and do not write rollback records.
return if resolving_pessimistic_lock && lock.lock_type == LockType::Pessimistic {
let released = txn.unlock_key(primary_key, is_pessimistic_txn);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.pessimistic_rollback.inc();
Ok((TxnStatus::PessimisticRollBack, released))
} else {
let released =
txn.check_write_and_rollback_lock(primary_key, &lock, is_pessimistic_txn)?;
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok((TxnStatus::TtlExpire, released))
};
}
// If lock.min_commit_ts is 0, it's not a large transaction and we can't push forward
// its min_commit_ts otherwise the transaction can't be committed by old version TiDB
// during rolling update.
if!lock.min_commit_ts.is_zero()
&&!caller_start_ts.is_max()
// Push forward the min_commit_ts so that reading won't be blocked by locks.
&& caller_start_ts >= lock.min_commit_ts
{
lock.min_commit_ts = caller_start_ts.next();
if lock.min_commit_ts < current_ts {
lock.min_commit_ts = current_ts;
}
txn.put_lock(primary_key, &lock);
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.update_ts.inc();
}
// As long as the primary lock's min_commit_ts > caller_start_ts, locks belong to the same transaction
// can't block reading. Return MinCommitTsPushed result to the client to let it bypass locks.
let min_commit_ts_pushed = (!caller_start_ts.is_zero() && lock.min_commit_ts > caller_start_ts)
// If the caller_start_ts is max, it's a point get in the autocommit transaction.
// We don't push forward lock's min_commit_ts and the point get can ignore the lock
// next time because it's not committed yet.
|| caller_start_ts.is_max();
Ok((TxnStatus::uncommitted(lock, min_commit_ts_pushed), None))
}
pub fn check_txn_status_missing_lock<S: Snapshot>(
txn: &mut MvccTxn<S>,
primary_key: Key,
mismatch_lock: Option<Lock>,
action: MissingLockAction,
resolving_pessimistic_lock: bool,
) -> Result<TxnStatus> {
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.get_commit_info.inc();
match txn
.reader
.get_txn_commit_record(&primary_key, txn.start_ts)?
{
TxnCommitRecord::SingleRecord { commit_ts, write } => {
if write.write_type == WriteType::Rollback {
Ok(TxnStatus::RolledBack)
} else {
Ok(TxnStatus::committed(commit_ts))
}
}
TxnCommitRecord::OverlappedRollback {.. } => Ok(TxnStatus::RolledBack),
TxnCommitRecord::None { overlapped_write } => {
if MissingLockAction::ReturnError == action {
return Err(ErrorInner::TxnNotFound {
start_ts: txn.start_ts,
key: primary_key.into_raw()?,
}
.into());
}
if resolving_pessimistic_lock |
let ts = txn.start_ts;
// collapse previous rollback if exist.
if txn.collapse_rollback {
txn.collapse_prev_rollback(primary_key.clone())?;
}
if let (Some(l), None) = (mismatch_lock, overlapped_write.as_ref()) {
txn.mark_rollback_on_mismatching_lock(
&primary_key,
l,
action == MissingLockAction::ProtectedRollback,
);
}
// Insert a Rollback to Write CF in case that a stale prewrite
// command is received after a cleanup command.
if let Some(write) = action.construct_write(ts, overlapped_write) {
txn.put_write(primary_key, ts, write.as_ref().to_bytes());
}
MVCC_CHECK_TXN_STATUS_COUNTER_VEC.rollback.inc();
Ok(TxnStatus::LockNotExist)
}
}
}
| {
return Ok(TxnStatus::LockNotExistDoNothing);
} | conditional_block |
scene.rs | //! The Scene system is basically for transitioning between
//! *completely* different states that have entirely different game
//! loops and but which all share a state. It operates as a stack, with new
//! scenes getting pushed to the stack (while the old ones stay in
//! memory unchanged). Apparently this is basically a push-down automata.
//!
//! Also there's no reason you can't have a Scene contain its own
//! Scene subsystem to do its own indirection. With a different state
//! type, as well! What fun! Though whether you want to go that deep
//! down the rabbit-hole is up to you. I haven't found it necessary
//! yet.
//!
//! This is basically identical in concept to the Amethyst engine's scene
//! system, the only difference is the details of how the pieces are put
//! together.
use ggez;
/// A command to change to a new scene, either by pushign a new one,
/// popping one or replacing the current scene (pop and then push).
pub enum SceneSwitch<C, Ev> {
None,
Push(Box<Scene<C, Ev>>),
Replace(Box<Scene<C, Ev>>),
Pop,
}
/// A trait for you to implement on a scene.
/// Defines the callbacks the scene uses:
/// a common context type `C`, and an input event type `Ev`.
pub trait Scene<C, Ev> {
fn update(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> SceneSwitch<C, Ev>;
fn draw(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> ggez::GameResult<()>;
fn input(&mut self, gameworld: &mut C, event: Ev, started: bool);
/// Only used for human-readable convenience (or not at all, tbh)
fn name(&self) -> &str;
/// This returns whether or not to draw the next scene down on the
/// stack as well; this is useful for layers or GUI stuff that
/// only partially covers the screen.
fn draw_previous(&self) -> bool {
false
}
}
impl<C, Ev> SceneSwitch<C, Ev> {
/// Convenient shortcut function for boxing scenes.
///
/// Slightly nicer than writing
/// `SceneSwitch::Replace(Box::new(x))` all the damn time.
pub fn replace<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Replace(Box::new(scene))
}
/// Same as `replace()` but returns SceneSwitch::Push
pub fn push<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Push(Box::new(scene))
}
}
/// A stack of `Scene`'s, together with a context object.
pub struct SceneStack<C, Ev> {
pub world: C,
scenes: Vec<Box<Scene<C, Ev>>>,
}
impl<C, Ev> SceneStack<C, Ev> {
pub fn new(_ctx: &mut ggez::Context, global_state: C) -> Self {
Self {
world: global_state,
scenes: Vec::new(),
}
}
/// Add a new scene to the top of the stack.
pub fn push(&mut self, scene: Box<Scene<C, Ev>>) {
self.scenes.push(scene)
}
/// Remove the top scene from the stack and returns it;
/// panics if there is none.
pub fn pop(&mut self) -> Box<Scene<C, Ev>> {
self.scenes
.pop()
.expect("ERROR: Popped an empty scene stack.")
}
/// Returns the current scene; panics if there is none.
pub fn current(&self) -> &Scene<C, Ev> {
&**self
.scenes
.last()
.expect("ERROR: Tried to get current scene of an empty scene stack.")
}
/// Executes the given SceneSwitch command; if it is a pop or replace
/// it returns `Some(old_scene)`, otherwise `None`
pub fn switch(&mut self, next_scene: SceneSwitch<C, Ev>) -> Option<Box<Scene<C, Ev>>> {
match next_scene {
SceneSwitch::None => None,
SceneSwitch::Pop => {
let s = self.pop();
Some(s)
}
SceneSwitch::Push(s) => {
self.push(s);
None
}
SceneSwitch::Replace(s) => {
let old_scene = self.pop();
self.push(s);
Some(old_scene)
}
}
}
// These functions must be on the SceneStack because otherwise
// if you try to get the current scene and the world to call
// update() on the current scene it causes a double-borrow. :/
pub fn update(&mut self, ctx: &mut ggez::Context) {
let next_scene = {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to update empty scene stack");
current_scene.update(&mut self.world, ctx)
};
self.switch(next_scene);
}
/// We walk down the scene stack until we find a scene where we aren't
/// supposed to draw the previous one, then draw them from the bottom up.
///
/// This allows for layering GUI's and such.
fn draw_scenes(scenes: &mut [Box<Scene<C, Ev>>], world: &mut C, ctx: &mut ggez::Context) {
assert!(scenes.len() > 0);
if let Some((current, rest)) = scenes.split_last_mut() {
if current.draw_previous() {
SceneStack::draw_scenes(rest, world, ctx);
}
current
.draw(world, ctx)
.expect("I would hope drawing a scene never fails!");
}
}
/// Draw the current scene.
pub fn draw(&mut self, ctx: &mut ggez::Context) {
SceneStack::draw_scenes(&mut self.scenes, &mut self.world, ctx)
}
/// Feeds the given input event to the current scene.
pub fn input(&mut self, event: Ev, started: bool) |
}
#[cfg(test)]
mod tests {
use super::*;
struct Thing {
scenes: Vec<SceneStack<u32, u32>>,
}
#[test]
fn test1() {
let x = Thing { scenes: vec![] };
}
}
| {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to do input for empty scene stack");
current_scene.input(&mut self.world, event, started);
} | identifier_body |
scene.rs | //! The Scene system is basically for transitioning between
//! *completely* different states that have entirely different game
//! loops and but which all share a state. It operates as a stack, with new
//! scenes getting pushed to the stack (while the old ones stay in
//! memory unchanged). Apparently this is basically a push-down automata.
//!
//! Also there's no reason you can't have a Scene contain its own
//! Scene subsystem to do its own indirection. With a different state
//! type, as well! What fun! Though whether you want to go that deep
//! down the rabbit-hole is up to you. I haven't found it necessary
//! yet.
//!
//! This is basically identical in concept to the Amethyst engine's scene
//! system, the only difference is the details of how the pieces are put
//! together.
use ggez;
/// A command to change to a new scene, either by pushign a new one,
/// popping one or replacing the current scene (pop and then push).
pub enum SceneSwitch<C, Ev> {
None,
Push(Box<Scene<C, Ev>>),
Replace(Box<Scene<C, Ev>>),
Pop,
}
/// A trait for you to implement on a scene.
/// Defines the callbacks the scene uses:
/// a common context type `C`, and an input event type `Ev`.
pub trait Scene<C, Ev> {
fn update(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> SceneSwitch<C, Ev>;
fn draw(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> ggez::GameResult<()>;
fn input(&mut self, gameworld: &mut C, event: Ev, started: bool);
/// Only used for human-readable convenience (or not at all, tbh)
fn name(&self) -> &str;
/// This returns whether or not to draw the next scene down on the
/// stack as well; this is useful for layers or GUI stuff that
/// only partially covers the screen.
fn draw_previous(&self) -> bool {
false
}
}
impl<C, Ev> SceneSwitch<C, Ev> {
/// Convenient shortcut function for boxing scenes.
///
/// Slightly nicer than writing
/// `SceneSwitch::Replace(Box::new(x))` all the damn time.
pub fn replace<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Replace(Box::new(scene))
}
/// Same as `replace()` but returns SceneSwitch::Push
pub fn push<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Push(Box::new(scene))
}
}
/// A stack of `Scene`'s, together with a context object.
pub struct SceneStack<C, Ev> {
pub world: C,
scenes: Vec<Box<Scene<C, Ev>>>,
}
impl<C, Ev> SceneStack<C, Ev> {
pub fn new(_ctx: &mut ggez::Context, global_state: C) -> Self {
Self {
world: global_state,
scenes: Vec::new(),
}
}
/// Add a new scene to the top of the stack.
pub fn push(&mut self, scene: Box<Scene<C, Ev>>) {
self.scenes.push(scene)
}
/// Remove the top scene from the stack and returns it;
/// panics if there is none.
pub fn pop(&mut self) -> Box<Scene<C, Ev>> {
self.scenes
.pop()
.expect("ERROR: Popped an empty scene stack.")
}
/// Returns the current scene; panics if there is none.
pub fn current(&self) -> &Scene<C, Ev> {
&**self
.scenes
.last()
.expect("ERROR: Tried to get current scene of an empty scene stack.")
}
/// Executes the given SceneSwitch command; if it is a pop or replace
/// it returns `Some(old_scene)`, otherwise `None`
pub fn switch(&mut self, next_scene: SceneSwitch<C, Ev>) -> Option<Box<Scene<C, Ev>>> {
match next_scene {
SceneSwitch::None => None,
SceneSwitch::Pop => {
let s = self.pop();
Some(s)
}
SceneSwitch::Push(s) => {
self.push(s);
None
}
SceneSwitch::Replace(s) => |
}
}
// These functions must be on the SceneStack because otherwise
// if you try to get the current scene and the world to call
// update() on the current scene it causes a double-borrow. :/
pub fn update(&mut self, ctx: &mut ggez::Context) {
let next_scene = {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to update empty scene stack");
current_scene.update(&mut self.world, ctx)
};
self.switch(next_scene);
}
/// We walk down the scene stack until we find a scene where we aren't
/// supposed to draw the previous one, then draw them from the bottom up.
///
/// This allows for layering GUI's and such.
fn draw_scenes(scenes: &mut [Box<Scene<C, Ev>>], world: &mut C, ctx: &mut ggez::Context) {
assert!(scenes.len() > 0);
if let Some((current, rest)) = scenes.split_last_mut() {
if current.draw_previous() {
SceneStack::draw_scenes(rest, world, ctx);
}
current
.draw(world, ctx)
.expect("I would hope drawing a scene never fails!");
}
}
/// Draw the current scene.
pub fn draw(&mut self, ctx: &mut ggez::Context) {
SceneStack::draw_scenes(&mut self.scenes, &mut self.world, ctx)
}
/// Feeds the given input event to the current scene.
pub fn input(&mut self, event: Ev, started: bool) {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to do input for empty scene stack");
current_scene.input(&mut self.world, event, started);
}
}
#[cfg(test)]
mod tests {
use super::*;
struct Thing {
scenes: Vec<SceneStack<u32, u32>>,
}
#[test]
fn test1() {
let x = Thing { scenes: vec![] };
}
}
| {
let old_scene = self.pop();
self.push(s);
Some(old_scene)
} | conditional_block |
scene.rs | //! The Scene system is basically for transitioning between
//! *completely* different states that have entirely different game
//! loops and but which all share a state. It operates as a stack, with new
//! scenes getting pushed to the stack (while the old ones stay in
//! memory unchanged). Apparently this is basically a push-down automata.
//!
//! Also there's no reason you can't have a Scene contain its own
//! Scene subsystem to do its own indirection. With a different state
//! type, as well! What fun! Though whether you want to go that deep
//! down the rabbit-hole is up to you. I haven't found it necessary
//! yet.
//!
//! This is basically identical in concept to the Amethyst engine's scene
//! system, the only difference is the details of how the pieces are put
//! together.
use ggez;
/// A command to change to a new scene, either by pushign a new one,
/// popping one or replacing the current scene (pop and then push).
pub enum SceneSwitch<C, Ev> {
None,
Push(Box<Scene<C, Ev>>),
Replace(Box<Scene<C, Ev>>),
Pop,
}
/// A trait for you to implement on a scene.
/// Defines the callbacks the scene uses:
/// a common context type `C`, and an input event type `Ev`.
pub trait Scene<C, Ev> {
fn update(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> SceneSwitch<C, Ev>;
fn draw(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> ggez::GameResult<()>;
fn input(&mut self, gameworld: &mut C, event: Ev, started: bool);
/// Only used for human-readable convenience (or not at all, tbh)
fn name(&self) -> &str;
/// This returns whether or not to draw the next scene down on the
/// stack as well; this is useful for layers or GUI stuff that
/// only partially covers the screen.
fn draw_previous(&self) -> bool {
false
}
}
impl<C, Ev> SceneSwitch<C, Ev> {
/// Convenient shortcut function for boxing scenes.
///
/// Slightly nicer than writing
/// `SceneSwitch::Replace(Box::new(x))` all the damn time.
pub fn replace<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static, | pub fn push<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Push(Box::new(scene))
}
}
/// A stack of `Scene`'s, together with a context object.
pub struct SceneStack<C, Ev> {
pub world: C,
scenes: Vec<Box<Scene<C, Ev>>>,
}
impl<C, Ev> SceneStack<C, Ev> {
pub fn new(_ctx: &mut ggez::Context, global_state: C) -> Self {
Self {
world: global_state,
scenes: Vec::new(),
}
}
/// Add a new scene to the top of the stack.
pub fn push(&mut self, scene: Box<Scene<C, Ev>>) {
self.scenes.push(scene)
}
/// Remove the top scene from the stack and returns it;
/// panics if there is none.
pub fn pop(&mut self) -> Box<Scene<C, Ev>> {
self.scenes
.pop()
.expect("ERROR: Popped an empty scene stack.")
}
/// Returns the current scene; panics if there is none.
pub fn current(&self) -> &Scene<C, Ev> {
&**self
.scenes
.last()
.expect("ERROR: Tried to get current scene of an empty scene stack.")
}
/// Executes the given SceneSwitch command; if it is a pop or replace
/// it returns `Some(old_scene)`, otherwise `None`
pub fn switch(&mut self, next_scene: SceneSwitch<C, Ev>) -> Option<Box<Scene<C, Ev>>> {
match next_scene {
SceneSwitch::None => None,
SceneSwitch::Pop => {
let s = self.pop();
Some(s)
}
SceneSwitch::Push(s) => {
self.push(s);
None
}
SceneSwitch::Replace(s) => {
let old_scene = self.pop();
self.push(s);
Some(old_scene)
}
}
}
// These functions must be on the SceneStack because otherwise
// if you try to get the current scene and the world to call
// update() on the current scene it causes a double-borrow. :/
pub fn update(&mut self, ctx: &mut ggez::Context) {
let next_scene = {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to update empty scene stack");
current_scene.update(&mut self.world, ctx)
};
self.switch(next_scene);
}
/// We walk down the scene stack until we find a scene where we aren't
/// supposed to draw the previous one, then draw them from the bottom up.
///
/// This allows for layering GUI's and such.
fn draw_scenes(scenes: &mut [Box<Scene<C, Ev>>], world: &mut C, ctx: &mut ggez::Context) {
assert!(scenes.len() > 0);
if let Some((current, rest)) = scenes.split_last_mut() {
if current.draw_previous() {
SceneStack::draw_scenes(rest, world, ctx);
}
current
.draw(world, ctx)
.expect("I would hope drawing a scene never fails!");
}
}
/// Draw the current scene.
pub fn draw(&mut self, ctx: &mut ggez::Context) {
SceneStack::draw_scenes(&mut self.scenes, &mut self.world, ctx)
}
/// Feeds the given input event to the current scene.
pub fn input(&mut self, event: Ev, started: bool) {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to do input for empty scene stack");
current_scene.input(&mut self.world, event, started);
}
}
#[cfg(test)]
mod tests {
use super::*;
struct Thing {
scenes: Vec<SceneStack<u32, u32>>,
}
#[test]
fn test1() {
let x = Thing { scenes: vec![] };
}
} | {
SceneSwitch::Replace(Box::new(scene))
}
/// Same as `replace()` but returns SceneSwitch::Push | random_line_split |
scene.rs | //! The Scene system is basically for transitioning between
//! *completely* different states that have entirely different game
//! loops and but which all share a state. It operates as a stack, with new
//! scenes getting pushed to the stack (while the old ones stay in
//! memory unchanged). Apparently this is basically a push-down automata.
//!
//! Also there's no reason you can't have a Scene contain its own
//! Scene subsystem to do its own indirection. With a different state
//! type, as well! What fun! Though whether you want to go that deep
//! down the rabbit-hole is up to you. I haven't found it necessary
//! yet.
//!
//! This is basically identical in concept to the Amethyst engine's scene
//! system, the only difference is the details of how the pieces are put
//! together.
use ggez;
/// A command to change to a new scene, either by pushign a new one,
/// popping one or replacing the current scene (pop and then push).
pub enum SceneSwitch<C, Ev> {
None,
Push(Box<Scene<C, Ev>>),
Replace(Box<Scene<C, Ev>>),
Pop,
}
/// A trait for you to implement on a scene.
/// Defines the callbacks the scene uses:
/// a common context type `C`, and an input event type `Ev`.
pub trait Scene<C, Ev> {
fn update(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> SceneSwitch<C, Ev>;
fn draw(&mut self, gameworld: &mut C, ctx: &mut ggez::Context) -> ggez::GameResult<()>;
fn input(&mut self, gameworld: &mut C, event: Ev, started: bool);
/// Only used for human-readable convenience (or not at all, tbh)
fn name(&self) -> &str;
/// This returns whether or not to draw the next scene down on the
/// stack as well; this is useful for layers or GUI stuff that
/// only partially covers the screen.
fn draw_previous(&self) -> bool {
false
}
}
impl<C, Ev> SceneSwitch<C, Ev> {
/// Convenient shortcut function for boxing scenes.
///
/// Slightly nicer than writing
/// `SceneSwitch::Replace(Box::new(x))` all the damn time.
pub fn replace<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Replace(Box::new(scene))
}
/// Same as `replace()` but returns SceneSwitch::Push
pub fn push<S>(scene: S) -> Self
where
S: Scene<C, Ev> +'static,
{
SceneSwitch::Push(Box::new(scene))
}
}
/// A stack of `Scene`'s, together with a context object.
pub struct SceneStack<C, Ev> {
pub world: C,
scenes: Vec<Box<Scene<C, Ev>>>,
}
impl<C, Ev> SceneStack<C, Ev> {
pub fn new(_ctx: &mut ggez::Context, global_state: C) -> Self {
Self {
world: global_state,
scenes: Vec::new(),
}
}
/// Add a new scene to the top of the stack.
pub fn push(&mut self, scene: Box<Scene<C, Ev>>) {
self.scenes.push(scene)
}
/// Remove the top scene from the stack and returns it;
/// panics if there is none.
pub fn pop(&mut self) -> Box<Scene<C, Ev>> {
self.scenes
.pop()
.expect("ERROR: Popped an empty scene stack.")
}
/// Returns the current scene; panics if there is none.
pub fn current(&self) -> &Scene<C, Ev> {
&**self
.scenes
.last()
.expect("ERROR: Tried to get current scene of an empty scene stack.")
}
/// Executes the given SceneSwitch command; if it is a pop or replace
/// it returns `Some(old_scene)`, otherwise `None`
pub fn | (&mut self, next_scene: SceneSwitch<C, Ev>) -> Option<Box<Scene<C, Ev>>> {
match next_scene {
SceneSwitch::None => None,
SceneSwitch::Pop => {
let s = self.pop();
Some(s)
}
SceneSwitch::Push(s) => {
self.push(s);
None
}
SceneSwitch::Replace(s) => {
let old_scene = self.pop();
self.push(s);
Some(old_scene)
}
}
}
// These functions must be on the SceneStack because otherwise
// if you try to get the current scene and the world to call
// update() on the current scene it causes a double-borrow. :/
pub fn update(&mut self, ctx: &mut ggez::Context) {
let next_scene = {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to update empty scene stack");
current_scene.update(&mut self.world, ctx)
};
self.switch(next_scene);
}
/// We walk down the scene stack until we find a scene where we aren't
/// supposed to draw the previous one, then draw them from the bottom up.
///
/// This allows for layering GUI's and such.
fn draw_scenes(scenes: &mut [Box<Scene<C, Ev>>], world: &mut C, ctx: &mut ggez::Context) {
assert!(scenes.len() > 0);
if let Some((current, rest)) = scenes.split_last_mut() {
if current.draw_previous() {
SceneStack::draw_scenes(rest, world, ctx);
}
current
.draw(world, ctx)
.expect("I would hope drawing a scene never fails!");
}
}
/// Draw the current scene.
pub fn draw(&mut self, ctx: &mut ggez::Context) {
SceneStack::draw_scenes(&mut self.scenes, &mut self.world, ctx)
}
/// Feeds the given input event to the current scene.
pub fn input(&mut self, event: Ev, started: bool) {
let current_scene = &mut **self
.scenes
.last_mut()
.expect("Tried to do input for empty scene stack");
current_scene.input(&mut self.world, event, started);
}
}
#[cfg(test)]
mod tests {
use super::*;
struct Thing {
scenes: Vec<SceneStack<u32, u32>>,
}
#[test]
fn test1() {
let x = Thing { scenes: vec![] };
}
}
| switch | identifier_name |
treiber.rs | //! Treiber stacks.
use std::sync::atomic::{self, AtomicPtr};
use std::marker::PhantomData;
use std::ptr;
use {Guard, add_garbage_box};
/// A Treiber stack.
///
/// Treiber stacks are one way to implement concurrent LIFO stack.
///
/// Treiber stacks builds on linked lists. They are lock-free and non-blocking. It can be compared
/// to transactional memory in that it repeats operations, if another thread changes it while.
///
/// The ABA problem is of course addressed through the API of this crate.
pub struct Treiber<T> {
/// The head node.
head: AtomicPtr<Node<T>>,
/// Make the `Sync` and `Send` (and other OIBITs) transitive.
_marker: PhantomData<T>,
}
impl<T> Treiber<T> {
/// Create a new, empty Treiber stack.
pub fn new() -> Treiber<T> {
Treiber {
head: AtomicPtr::default(),
_marker: PhantomData,
}
}
/// Pop an item from the stack.
// TODO: Change this return type.
pub fn pop(&self) -> Option<Guard<T>> {
// TODO: Use `catch {}` here when it lands.
// Read the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Acquire).as_ref()
});
// Unless the head snapshot is `None`, try to replace it with the tail.
while let Some(old) = snapshot {
// Attempt to replace the head with the tail of the head.
snapshot = Guard::maybe_new(|| unsafe {
self.head.compare_and_swap(
old.as_ptr() as *mut _,
old.next as *mut Node<T>,
atomic::Ordering::Release,
).as_ref()
});
// If it match, we are done as the previous head node was replaced by the tail, popping
// the top element. The element we return is the one carried by the previous head.
if let Some(ref new) = snapshot {
if new.as_ptr() == old.as_ptr() {
// As we overwrote the old head (the CAS was successful), we must queue its
// deletion.
unsafe { add_garbage_box(old.as_ptr()); }
// Map the guard to refer the item.
return Some(old.map(|x| &x.item));
}
} else {
// Short-circuit.
break;
}
}
// As the head was empty, there is nothing to pop.
None
}
/// Push an item to the stack.
pub fn push(&self, item: T)
where T:'static {
// Load the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Relaxed).as_ref()
});
// TODO: Use `catch {}` here when it lands.
// Construct a node, which will be the new head.
let mut node = Box::into_raw(Box::new(Node {
item: item,
// Placeholder; we will replace it with an actual value in the loop.
next: ptr::null_mut(),
}));
loop {
// Construct the next-pointer of the new node from the head snapshot.
let next = snapshot.map_or(ptr::null_mut(), |x| x.as_ptr() as *mut _);
unsafe { (*node).next = next; }
// CAS from the read pointer (that is, the one we placed as `node.next`) to the new
// head.
match Guard::maybe_new(|| unsafe {
// TODO: This should be something that ignores the guard creation when the CAS
// succeeds, because it's expensive to do and not used anyway. It should be easy
// enough to implement, but I am struggling to come up with a good name for the
// method.
self.head.compare_and_swap(next, node, atomic::Ordering::Release).as_ref()
}) {
// If it succeeds (that is, the pointers matched and the CAS ran), the item has
// been pushed.
Some(ref new) if new.as_ptr() == next => break,
None if next.is_null() => break,
// If it fails, we will retry the CAS with updated values.
new => snapshot = new,
}
}
}
}
impl<T> Drop for Treiber<T> {
fn drop(&mut self) {
// Due to the nature of Treiber stacks, there are no active guards of things within the
// structure. They're all gone, thus we can safely mess with the inner structure.
unsafe {
let ptr = *self.head.get_mut();
if!ptr.is_null() {
// Call destructors on the stack.
(*ptr).destroy();
// Finally deallocate the pointer itself.
// TODO: Figure out if it is sound if this destructor panics.
drop(Box::from_raw(ptr));
}
}
}
}
/// A node in the stack.
struct Node<T> {
/// The data this node holds.
item: T,
/// The next node.
next: *mut Node<T>,
}
impl<T> Node<T> {
/// Destroy the node and its precessors.
///
/// This doesn't call the destructor on `T`.
///
/// # Safety
///
/// As this can be called multiple times, it is marked unsafe.
unsafe fn destroy(&mut self) {
// FIXME: Since this is recursive (and although it is likely optimized out), there might be
// cases where this leads to stack overflow, given correct compilation flags and
// sufficiently many elements.
// Recursively drop the next node, if it exists.
if!self.next.is_null() {
// Recurse to the next node.
(*self.next).destroy();
// Now that all of the children of the next node has been dropped, drop the node
// itself.
drop(Box::from_raw(self.next as *mut Node<T>));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
#[derive(Clone)]
struct Dropper {
d: Arc<AtomicUsize>,
}
impl Drop for Dropper {
fn drop(&mut self) {
self.d.fetch_add(1, atomic::Ordering::Relaxed);
}
}
#[test]
fn empty() {
for _ in 0..1000 {
let b = Box::new(20);
Treiber::<u8>::new();
assert_eq!(*b, 20);
}
}
#[test]
fn just_push() {
let stack = Treiber::new();
stack.push(1);
stack.push(2);
stack.push(3);
drop(stack);
}
#[test]
fn simple1() {
let stack = Treiber::new();
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
::gc();
}
#[test]
fn simple2() {
let stack = Treiber::new();
for _ in 0..16 {
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
stack.push(20000);
assert_eq!(*stack.pop().unwrap(), 20000);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
::gc();
}
#[test]
fn simple3() {
let stack = Treiber::new();
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
#[test]
fn push_pop() {
let stack = Arc::new(Treiber::new());
let mut j = Vec::new();
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..1_000_000 {
s.push(23);
assert_eq!(*s.pop().unwrap(), 23);
}
}));
}
for i in j {
i.join().unwrap();
}
}
#[test]
fn increment() | for i in j {
i.join().unwrap();
}
assert_eq!(*stack.pop().unwrap(), 16 * 1000 * 1001 / 2);
}
#[test]
fn sum() {
let stack = Arc::new(Treiber::<i64>::new());
let mut j = Vec::new();
for _ in 0..1000 {
stack.push(10);
}
// We preserve the sum of the stack's elements.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..100000 {
loop {
if let Some(a) = s.pop() {
loop {
if let Some(b) = s.pop() {
s.push(*a + 1);
s.push(*b - 1);
break;
}
}
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
let mut sum = 0;
while let Some(x) = stack.pop() {
sum += *x;
}
assert_eq!(sum, 10000);
}
#[test]
fn drop1() {
let drops = Arc::new(AtomicUsize::default());
let stack = Arc::new(Treiber::new());
let d = Dropper {
d: drops.clone(),
};
let mut j = Vec::new();
for _ in 0..16 {
let d = d.clone();
let stack = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..20 {
stack.push(d.clone());
}
stack.pop();
stack.pop();
}))
}
for i in j {
i.join().unwrap();
}
::gc();
// The 16 are for the `d` variable in the loop above.
assert_eq!(drops.load(atomic::Ordering::Relaxed), 32 + 16);
// Drop the last arc.
drop(stack);
::gc();
assert_eq!(drops.load(atomic::Ordering::Relaxed), 20 * 16 + 16);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
struct A;
impl Drop for A {
fn drop(&mut self) {
panic!();
}
}
let stack = Treiber::new();
stack.push(Box::new(A));
stack.push(Box::new(A));
stack.push(Box::new(A));
}
}
| {
let stack = Arc::new(Treiber::<u64>::new());
stack.push(0);
let mut j = Vec::new();
// 16 times, we add the numbers from 0 to 1000 to the only element in the stack.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for n in 0..1001 {
loop {
if let Some(x) = s.pop() {
s.push(*x + n);
break;
}
}
}
}));
}
| identifier_body |
treiber.rs | //! Treiber stacks.
use std::sync::atomic::{self, AtomicPtr};
use std::marker::PhantomData;
use std::ptr;
use {Guard, add_garbage_box};
/// A Treiber stack.
///
/// Treiber stacks are one way to implement concurrent LIFO stack.
///
/// Treiber stacks builds on linked lists. They are lock-free and non-blocking. It can be compared
/// to transactional memory in that it repeats operations, if another thread changes it while.
///
/// The ABA problem is of course addressed through the API of this crate.
pub struct Treiber<T> {
/// The head node.
head: AtomicPtr<Node<T>>,
/// Make the `Sync` and `Send` (and other OIBITs) transitive.
_marker: PhantomData<T>,
}
impl<T> Treiber<T> {
/// Create a new, empty Treiber stack.
pub fn new() -> Treiber<T> {
Treiber {
head: AtomicPtr::default(),
_marker: PhantomData,
}
}
/// Pop an item from the stack.
// TODO: Change this return type.
pub fn pop(&self) -> Option<Guard<T>> {
// TODO: Use `catch {}` here when it lands.
// Read the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Acquire).as_ref()
});
// Unless the head snapshot is `None`, try to replace it with the tail.
while let Some(old) = snapshot {
// Attempt to replace the head with the tail of the head.
snapshot = Guard::maybe_new(|| unsafe {
self.head.compare_and_swap(
old.as_ptr() as *mut _,
old.next as *mut Node<T>,
atomic::Ordering::Release,
).as_ref()
});
// If it match, we are done as the previous head node was replaced by the tail, popping
// the top element. The element we return is the one carried by the previous head.
if let Some(ref new) = snapshot {
if new.as_ptr() == old.as_ptr() {
// As we overwrote the old head (the CAS was successful), we must queue its
// deletion.
unsafe { add_garbage_box(old.as_ptr()); }
// Map the guard to refer the item.
return Some(old.map(|x| &x.item));
}
} else {
// Short-circuit.
break;
}
}
// As the head was empty, there is nothing to pop.
None
}
/// Push an item to the stack.
pub fn push(&self, item: T)
where T:'static {
// Load the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Relaxed).as_ref()
});
// TODO: Use `catch {}` here when it lands.
// Construct a node, which will be the new head.
let mut node = Box::into_raw(Box::new(Node {
item: item,
// Placeholder; we will replace it with an actual value in the loop.
next: ptr::null_mut(),
}));
loop {
// Construct the next-pointer of the new node from the head snapshot.
let next = snapshot.map_or(ptr::null_mut(), |x| x.as_ptr() as *mut _);
unsafe { (*node).next = next; }
// CAS from the read pointer (that is, the one we placed as `node.next`) to the new
// head.
match Guard::maybe_new(|| unsafe {
// TODO: This should be something that ignores the guard creation when the CAS
// succeeds, because it's expensive to do and not used anyway. It should be easy
// enough to implement, but I am struggling to come up with a good name for the
// method.
self.head.compare_and_swap(next, node, atomic::Ordering::Release).as_ref()
}) {
// If it succeeds (that is, the pointers matched and the CAS ran), the item has
// been pushed.
Some(ref new) if new.as_ptr() == next => break,
None if next.is_null() => break,
// If it fails, we will retry the CAS with updated values.
new => snapshot = new,
}
}
}
}
impl<T> Drop for Treiber<T> {
fn drop(&mut self) {
// Due to the nature of Treiber stacks, there are no active guards of things within the
// structure. They're all gone, thus we can safely mess with the inner structure.
unsafe {
let ptr = *self.head.get_mut();
if!ptr.is_null() {
// Call destructors on the stack.
(*ptr).destroy();
// Finally deallocate the pointer itself.
// TODO: Figure out if it is sound if this destructor panics.
drop(Box::from_raw(ptr));
}
}
}
}
/// A node in the stack.
struct Node<T> {
/// The data this node holds.
item: T,
/// The next node.
next: *mut Node<T>,
}
impl<T> Node<T> {
/// Destroy the node and its precessors.
///
/// This doesn't call the destructor on `T`.
///
/// # Safety
///
/// As this can be called multiple times, it is marked unsafe.
unsafe fn destroy(&mut self) {
// FIXME: Since this is recursive (and although it is likely optimized out), there might be
// cases where this leads to stack overflow, given correct compilation flags and
// sufficiently many elements.
// Recursively drop the next node, if it exists.
if!self.next.is_null() {
// Recurse to the next node.
(*self.next).destroy();
// Now that all of the children of the next node has been dropped, drop the node
// itself.
drop(Box::from_raw(self.next as *mut Node<T>));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
#[derive(Clone)]
struct Dropper {
d: Arc<AtomicUsize>,
}
impl Drop for Dropper {
fn drop(&mut self) {
self.d.fetch_add(1, atomic::Ordering::Relaxed);
}
}
#[test]
fn empty() {
for _ in 0..1000 {
let b = Box::new(20);
Treiber::<u8>::new();
assert_eq!(*b, 20);
}
}
#[test]
fn just_push() {
let stack = Treiber::new();
stack.push(1);
stack.push(2);
stack.push(3);
drop(stack);
}
| stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
::gc();
}
#[test]
fn simple2() {
let stack = Treiber::new();
for _ in 0..16 {
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
stack.push(20000);
assert_eq!(*stack.pop().unwrap(), 20000);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
::gc();
}
#[test]
fn simple3() {
let stack = Treiber::new();
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
#[test]
fn push_pop() {
let stack = Arc::new(Treiber::new());
let mut j = Vec::new();
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..1_000_000 {
s.push(23);
assert_eq!(*s.pop().unwrap(), 23);
}
}));
}
for i in j {
i.join().unwrap();
}
}
#[test]
fn increment() {
let stack = Arc::new(Treiber::<u64>::new());
stack.push(0);
let mut j = Vec::new();
// 16 times, we add the numbers from 0 to 1000 to the only element in the stack.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for n in 0..1001 {
loop {
if let Some(x) = s.pop() {
s.push(*x + n);
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
assert_eq!(*stack.pop().unwrap(), 16 * 1000 * 1001 / 2);
}
#[test]
fn sum() {
let stack = Arc::new(Treiber::<i64>::new());
let mut j = Vec::new();
for _ in 0..1000 {
stack.push(10);
}
// We preserve the sum of the stack's elements.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..100000 {
loop {
if let Some(a) = s.pop() {
loop {
if let Some(b) = s.pop() {
s.push(*a + 1);
s.push(*b - 1);
break;
}
}
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
let mut sum = 0;
while let Some(x) = stack.pop() {
sum += *x;
}
assert_eq!(sum, 10000);
}
#[test]
fn drop1() {
let drops = Arc::new(AtomicUsize::default());
let stack = Arc::new(Treiber::new());
let d = Dropper {
d: drops.clone(),
};
let mut j = Vec::new();
for _ in 0..16 {
let d = d.clone();
let stack = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..20 {
stack.push(d.clone());
}
stack.pop();
stack.pop();
}))
}
for i in j {
i.join().unwrap();
}
::gc();
// The 16 are for the `d` variable in the loop above.
assert_eq!(drops.load(atomic::Ordering::Relaxed), 32 + 16);
// Drop the last arc.
drop(stack);
::gc();
assert_eq!(drops.load(atomic::Ordering::Relaxed), 20 * 16 + 16);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
struct A;
impl Drop for A {
fn drop(&mut self) {
panic!();
}
}
let stack = Treiber::new();
stack.push(Box::new(A));
stack.push(Box::new(A));
stack.push(Box::new(A));
}
} | #[test]
fn simple1() {
let stack = Treiber::new();
stack.push(1); | random_line_split |
treiber.rs | //! Treiber stacks.
use std::sync::atomic::{self, AtomicPtr};
use std::marker::PhantomData;
use std::ptr;
use {Guard, add_garbage_box};
/// A Treiber stack.
///
/// Treiber stacks are one way to implement concurrent LIFO stack.
///
/// Treiber stacks builds on linked lists. They are lock-free and non-blocking. It can be compared
/// to transactional memory in that it repeats operations, if another thread changes it while.
///
/// The ABA problem is of course addressed through the API of this crate.
pub struct Treiber<T> {
/// The head node.
head: AtomicPtr<Node<T>>,
/// Make the `Sync` and `Send` (and other OIBITs) transitive.
_marker: PhantomData<T>,
}
impl<T> Treiber<T> {
/// Create a new, empty Treiber stack.
pub fn new() -> Treiber<T> {
Treiber {
head: AtomicPtr::default(),
_marker: PhantomData,
}
}
/// Pop an item from the stack.
// TODO: Change this return type.
pub fn pop(&self) -> Option<Guard<T>> {
// TODO: Use `catch {}` here when it lands.
// Read the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Acquire).as_ref()
});
// Unless the head snapshot is `None`, try to replace it with the tail.
while let Some(old) = snapshot {
// Attempt to replace the head with the tail of the head.
snapshot = Guard::maybe_new(|| unsafe {
self.head.compare_and_swap(
old.as_ptr() as *mut _,
old.next as *mut Node<T>,
atomic::Ordering::Release,
).as_ref()
});
// If it match, we are done as the previous head node was replaced by the tail, popping
// the top element. The element we return is the one carried by the previous head.
if let Some(ref new) = snapshot {
if new.as_ptr() == old.as_ptr() {
// As we overwrote the old head (the CAS was successful), we must queue its
// deletion.
unsafe { add_garbage_box(old.as_ptr()); }
// Map the guard to refer the item.
return Some(old.map(|x| &x.item));
}
} else {
// Short-circuit.
break;
}
}
// As the head was empty, there is nothing to pop.
None
}
/// Push an item to the stack.
pub fn push(&self, item: T)
where T:'static {
// Load the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Relaxed).as_ref()
});
// TODO: Use `catch {}` here when it lands.
// Construct a node, which will be the new head.
let mut node = Box::into_raw(Box::new(Node {
item: item,
// Placeholder; we will replace it with an actual value in the loop.
next: ptr::null_mut(),
}));
loop {
// Construct the next-pointer of the new node from the head snapshot.
let next = snapshot.map_or(ptr::null_mut(), |x| x.as_ptr() as *mut _);
unsafe { (*node).next = next; }
// CAS from the read pointer (that is, the one we placed as `node.next`) to the new
// head.
match Guard::maybe_new(|| unsafe {
// TODO: This should be something that ignores the guard creation when the CAS
// succeeds, because it's expensive to do and not used anyway. It should be easy
// enough to implement, but I am struggling to come up with a good name for the
// method.
self.head.compare_and_swap(next, node, atomic::Ordering::Release).as_ref()
}) {
// If it succeeds (that is, the pointers matched and the CAS ran), the item has
// been pushed.
Some(ref new) if new.as_ptr() == next => break,
None if next.is_null() => break,
// If it fails, we will retry the CAS with updated values.
new => snapshot = new,
}
}
}
}
impl<T> Drop for Treiber<T> {
fn drop(&mut self) {
// Due to the nature of Treiber stacks, there are no active guards of things within the
// structure. They're all gone, thus we can safely mess with the inner structure.
unsafe {
let ptr = *self.head.get_mut();
if!ptr.is_null() {
// Call destructors on the stack.
(*ptr).destroy();
// Finally deallocate the pointer itself.
// TODO: Figure out if it is sound if this destructor panics.
drop(Box::from_raw(ptr));
}
}
}
}
/// A node in the stack.
struct Node<T> {
/// The data this node holds.
item: T,
/// The next node.
next: *mut Node<T>,
}
impl<T> Node<T> {
/// Destroy the node and its precessors.
///
/// This doesn't call the destructor on `T`.
///
/// # Safety
///
/// As this can be called multiple times, it is marked unsafe.
unsafe fn destroy(&mut self) {
// FIXME: Since this is recursive (and although it is likely optimized out), there might be
// cases where this leads to stack overflow, given correct compilation flags and
// sufficiently many elements.
// Recursively drop the next node, if it exists.
if!self.next.is_null() {
// Recurse to the next node.
(*self.next).destroy();
// Now that all of the children of the next node has been dropped, drop the node
// itself.
drop(Box::from_raw(self.next as *mut Node<T>));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
#[derive(Clone)]
struct Dropper {
d: Arc<AtomicUsize>,
}
impl Drop for Dropper {
fn drop(&mut self) {
self.d.fetch_add(1, atomic::Ordering::Relaxed);
}
}
#[test]
fn empty() {
for _ in 0..1000 {
let b = Box::new(20);
Treiber::<u8>::new();
assert_eq!(*b, 20);
}
}
#[test]
fn just_push() {
let stack = Treiber::new();
stack.push(1);
stack.push(2);
stack.push(3);
drop(stack);
}
#[test]
fn simple1() {
let stack = Treiber::new();
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
::gc();
}
#[test]
fn simple2() {
let stack = Treiber::new();
for _ in 0..16 {
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
stack.push(20000);
assert_eq!(*stack.pop().unwrap(), 20000);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
::gc();
}
#[test]
fn simple3() {
let stack = Treiber::new();
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
#[test]
fn | () {
let stack = Arc::new(Treiber::new());
let mut j = Vec::new();
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..1_000_000 {
s.push(23);
assert_eq!(*s.pop().unwrap(), 23);
}
}));
}
for i in j {
i.join().unwrap();
}
}
#[test]
fn increment() {
let stack = Arc::new(Treiber::<u64>::new());
stack.push(0);
let mut j = Vec::new();
// 16 times, we add the numbers from 0 to 1000 to the only element in the stack.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for n in 0..1001 {
loop {
if let Some(x) = s.pop() {
s.push(*x + n);
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
assert_eq!(*stack.pop().unwrap(), 16 * 1000 * 1001 / 2);
}
#[test]
fn sum() {
let stack = Arc::new(Treiber::<i64>::new());
let mut j = Vec::new();
for _ in 0..1000 {
stack.push(10);
}
// We preserve the sum of the stack's elements.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..100000 {
loop {
if let Some(a) = s.pop() {
loop {
if let Some(b) = s.pop() {
s.push(*a + 1);
s.push(*b - 1);
break;
}
}
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
let mut sum = 0;
while let Some(x) = stack.pop() {
sum += *x;
}
assert_eq!(sum, 10000);
}
#[test]
fn drop1() {
let drops = Arc::new(AtomicUsize::default());
let stack = Arc::new(Treiber::new());
let d = Dropper {
d: drops.clone(),
};
let mut j = Vec::new();
for _ in 0..16 {
let d = d.clone();
let stack = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..20 {
stack.push(d.clone());
}
stack.pop();
stack.pop();
}))
}
for i in j {
i.join().unwrap();
}
::gc();
// The 16 are for the `d` variable in the loop above.
assert_eq!(drops.load(atomic::Ordering::Relaxed), 32 + 16);
// Drop the last arc.
drop(stack);
::gc();
assert_eq!(drops.load(atomic::Ordering::Relaxed), 20 * 16 + 16);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
struct A;
impl Drop for A {
fn drop(&mut self) {
panic!();
}
}
let stack = Treiber::new();
stack.push(Box::new(A));
stack.push(Box::new(A));
stack.push(Box::new(A));
}
}
| push_pop | identifier_name |
treiber.rs | //! Treiber stacks.
use std::sync::atomic::{self, AtomicPtr};
use std::marker::PhantomData;
use std::ptr;
use {Guard, add_garbage_box};
/// A Treiber stack.
///
/// Treiber stacks are one way to implement concurrent LIFO stack.
///
/// Treiber stacks builds on linked lists. They are lock-free and non-blocking. It can be compared
/// to transactional memory in that it repeats operations, if another thread changes it while.
///
/// The ABA problem is of course addressed through the API of this crate.
pub struct Treiber<T> {
/// The head node.
head: AtomicPtr<Node<T>>,
/// Make the `Sync` and `Send` (and other OIBITs) transitive.
_marker: PhantomData<T>,
}
impl<T> Treiber<T> {
/// Create a new, empty Treiber stack.
pub fn new() -> Treiber<T> {
Treiber {
head: AtomicPtr::default(),
_marker: PhantomData,
}
}
/// Pop an item from the stack.
// TODO: Change this return type.
pub fn pop(&self) -> Option<Guard<T>> {
// TODO: Use `catch {}` here when it lands.
// Read the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Acquire).as_ref()
});
// Unless the head snapshot is `None`, try to replace it with the tail.
while let Some(old) = snapshot {
// Attempt to replace the head with the tail of the head.
snapshot = Guard::maybe_new(|| unsafe {
self.head.compare_and_swap(
old.as_ptr() as *mut _,
old.next as *mut Node<T>,
atomic::Ordering::Release,
).as_ref()
});
// If it match, we are done as the previous head node was replaced by the tail, popping
// the top element. The element we return is the one carried by the previous head.
if let Some(ref new) = snapshot {
if new.as_ptr() == old.as_ptr() {
// As we overwrote the old head (the CAS was successful), we must queue its
// deletion.
unsafe { add_garbage_box(old.as_ptr()); }
// Map the guard to refer the item.
return Some(old.map(|x| &x.item));
}
} else {
// Short-circuit.
break;
}
}
// As the head was empty, there is nothing to pop.
None
}
/// Push an item to the stack.
pub fn push(&self, item: T)
where T:'static {
// Load the head snapshot.
let mut snapshot = Guard::maybe_new(|| unsafe {
self.head.load(atomic::Ordering::Relaxed).as_ref()
});
// TODO: Use `catch {}` here when it lands.
// Construct a node, which will be the new head.
let mut node = Box::into_raw(Box::new(Node {
item: item,
// Placeholder; we will replace it with an actual value in the loop.
next: ptr::null_mut(),
}));
loop {
// Construct the next-pointer of the new node from the head snapshot.
let next = snapshot.map_or(ptr::null_mut(), |x| x.as_ptr() as *mut _);
unsafe { (*node).next = next; }
// CAS from the read pointer (that is, the one we placed as `node.next`) to the new
// head.
match Guard::maybe_new(|| unsafe {
// TODO: This should be something that ignores the guard creation when the CAS
// succeeds, because it's expensive to do and not used anyway. It should be easy
// enough to implement, but I am struggling to come up with a good name for the
// method.
self.head.compare_and_swap(next, node, atomic::Ordering::Release).as_ref()
}) {
// If it succeeds (that is, the pointers matched and the CAS ran), the item has
// been pushed.
Some(ref new) if new.as_ptr() == next => break,
None if next.is_null() => break,
// If it fails, we will retry the CAS with updated values.
new => snapshot = new,
}
}
}
}
impl<T> Drop for Treiber<T> {
fn drop(&mut self) {
// Due to the nature of Treiber stacks, there are no active guards of things within the
// structure. They're all gone, thus we can safely mess with the inner structure.
unsafe {
let ptr = *self.head.get_mut();
if!ptr.is_null() {
// Call destructors on the stack.
(*ptr).destroy();
// Finally deallocate the pointer itself.
// TODO: Figure out if it is sound if this destructor panics.
drop(Box::from_raw(ptr));
}
}
}
}
/// A node in the stack.
struct Node<T> {
/// The data this node holds.
item: T,
/// The next node.
next: *mut Node<T>,
}
impl<T> Node<T> {
/// Destroy the node and its precessors.
///
/// This doesn't call the destructor on `T`.
///
/// # Safety
///
/// As this can be called multiple times, it is marked unsafe.
unsafe fn destroy(&mut self) {
// FIXME: Since this is recursive (and although it is likely optimized out), there might be
// cases where this leads to stack overflow, given correct compilation flags and
// sufficiently many elements.
// Recursively drop the next node, if it exists.
if!self.next.is_null() {
// Recurse to the next node.
(*self.next).destroy();
// Now that all of the children of the next node has been dropped, drop the node
// itself.
drop(Box::from_raw(self.next as *mut Node<T>));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
#[derive(Clone)]
struct Dropper {
d: Arc<AtomicUsize>,
}
impl Drop for Dropper {
fn drop(&mut self) {
self.d.fetch_add(1, atomic::Ordering::Relaxed);
}
}
#[test]
fn empty() {
for _ in 0..1000 {
let b = Box::new(20);
Treiber::<u8>::new();
assert_eq!(*b, 20);
}
}
#[test]
fn just_push() {
let stack = Treiber::new();
stack.push(1);
stack.push(2);
stack.push(3);
drop(stack);
}
#[test]
fn simple1() {
let stack = Treiber::new();
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
::gc();
}
#[test]
fn simple2() {
let stack = Treiber::new();
for _ in 0..16 {
stack.push(1);
stack.push(200);
stack.push(44);
assert_eq!(*stack.pop().unwrap(), 44);
assert_eq!(*stack.pop().unwrap(), 200);
stack.push(20000);
assert_eq!(*stack.pop().unwrap(), 20000);
assert_eq!(*stack.pop().unwrap(), 1);
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
::gc();
}
#[test]
fn simple3() {
let stack = Treiber::new();
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
for i in 0..10000 {
stack.push(i);
}
for i in (0..10000).rev() {
assert_eq!(*stack.pop().unwrap(), i);
}
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
assert!(stack.pop().is_none());
}
#[test]
fn push_pop() {
let stack = Arc::new(Treiber::new());
let mut j = Vec::new();
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..1_000_000 {
s.push(23);
assert_eq!(*s.pop().unwrap(), 23);
}
}));
}
for i in j {
i.join().unwrap();
}
}
#[test]
fn increment() {
let stack = Arc::new(Treiber::<u64>::new());
stack.push(0);
let mut j = Vec::new();
// 16 times, we add the numbers from 0 to 1000 to the only element in the stack.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for n in 0..1001 {
loop {
if let Some(x) = s.pop() {
s.push(*x + n);
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
assert_eq!(*stack.pop().unwrap(), 16 * 1000 * 1001 / 2);
}
#[test]
fn sum() {
let stack = Arc::new(Treiber::<i64>::new());
let mut j = Vec::new();
for _ in 0..1000 {
stack.push(10);
}
// We preserve the sum of the stack's elements.
for _ in 0..16 {
let s = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..100000 {
loop {
if let Some(a) = s.pop() {
loop {
if let Some(b) = s.pop() |
}
break;
}
}
}
}));
}
for i in j {
i.join().unwrap();
}
let mut sum = 0;
while let Some(x) = stack.pop() {
sum += *x;
}
assert_eq!(sum, 10000);
}
#[test]
fn drop1() {
let drops = Arc::new(AtomicUsize::default());
let stack = Arc::new(Treiber::new());
let d = Dropper {
d: drops.clone(),
};
let mut j = Vec::new();
for _ in 0..16 {
let d = d.clone();
let stack = stack.clone();
j.push(thread::spawn(move || {
for _ in 0..20 {
stack.push(d.clone());
}
stack.pop();
stack.pop();
}))
}
for i in j {
i.join().unwrap();
}
::gc();
// The 16 are for the `d` variable in the loop above.
assert_eq!(drops.load(atomic::Ordering::Relaxed), 32 + 16);
// Drop the last arc.
drop(stack);
::gc();
assert_eq!(drops.load(atomic::Ordering::Relaxed), 20 * 16 + 16);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
struct A;
impl Drop for A {
fn drop(&mut self) {
panic!();
}
}
let stack = Treiber::new();
stack.push(Box::new(A));
stack.push(Box::new(A));
stack.push(Box::new(A));
}
}
| {
s.push(*a + 1);
s.push(*b - 1);
break;
} | conditional_block |
main.rs | fn main() {
use std::env;
// these seem to return the same things
for (key, value) in env::vars_os() {
println!("{:?}: {:?}", key, value);
}
for (key, value) in env::vars() {
println!("{:?}: {:?}", key, value);
}
let key = "OS";
match env::var_os(key) {
Some(val) => println!("{}: {:?}", key, val),
None => println!("{} is not defined in the environment.", key)
} | }
match env::var("SWARMIP") {
Ok(val) => println!("{:?}", val),
Err(e) => println!("couldn't interpret {}", e),
}
let data = env::var_os("OS");
println!("{:?}", data);
match env::var("SWARMIP") {
Ok(mediakraken_ip) => {
println!("{:?}", mediakraken_ip);
},
Err(e) => {
println!("couldn't interpret swarm {}", e);
match env::var("HOST_IP") {
Ok(mediakraken_ip) => {
println!("{:?}", mediakraken_ip);
},
Err(e) => {
println!("couldn't interpret host {}", e);
}
}
}
}
} |
match env::var("PATH") {
Ok(val) => println!("{:?}", val),
Err(e) => println!("couldn't interpret {}", e), | random_line_split |
main.rs | fn | () {
use std::env;
// these seem to return the same things
for (key, value) in env::vars_os() {
println!("{:?}: {:?}", key, value);
}
for (key, value) in env::vars() {
println!("{:?}: {:?}", key, value);
}
let key = "OS";
match env::var_os(key) {
Some(val) => println!("{}: {:?}", key, val),
None => println!("{} is not defined in the environment.", key)
}
match env::var("PATH") {
Ok(val) => println!("{:?}", val),
Err(e) => println!("couldn't interpret {}", e),
}
match env::var("SWARMIP") {
Ok(val) => println!("{:?}", val),
Err(e) => println!("couldn't interpret {}", e),
}
let data = env::var_os("OS");
println!("{:?}", data);
match env::var("SWARMIP") {
Ok(mediakraken_ip) => {
println!("{:?}", mediakraken_ip);
},
Err(e) => {
println!("couldn't interpret swarm {}", e);
match env::var("HOST_IP") {
Ok(mediakraken_ip) => {
println!("{:?}", mediakraken_ip);
},
Err(e) => {
println!("couldn't interpret host {}", e);
}
}
}
}
}
| main | identifier_name |
main.rs | fn main() | Err(e) => println!("couldn't interpret {}", e),
}
match env::var("SWARMIP") {
Ok(val) => println!("{:?}", val),
Err(e) => println!("couldn't interpret {}", e),
}
let data = env::var_os("OS");
println!("{:?}", data);
match env::var("SWARMIP") {
Ok(mediakraken_ip) => {
println!("{:?}", mediakraken_ip);
},
Err(e) => {
println!("couldn't interpret swarm {}", e);
match env::var("HOST_IP") {
Ok(mediakraken_ip) => {
println!("{:?}", mediakraken_ip);
},
Err(e) => {
println!("couldn't interpret host {}", e);
}
}
}
}
}
| {
use std::env;
// these seem to return the same things
for (key, value) in env::vars_os() {
println!("{:?}: {:?}", key, value);
}
for (key, value) in env::vars() {
println!("{:?}: {:?}", key, value);
}
let key = "OS";
match env::var_os(key) {
Some(val) => println!("{}: {:?}", key, val),
None => println!("{} is not defined in the environment.", key)
}
match env::var("PATH") {
Ok(val) => println!("{:?}", val), | identifier_body |
workernavigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::WorkerNavigatorBinding;
use dom::bindings::codegen::Bindings::WorkerNavigatorBinding::WorkerNavigatorMethods;
use dom::bindings::js::{MutNullableJS, Root};
use dom::bindings::reflector::{DomObject, Reflector, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::navigatorinfo;
use dom::permissions::Permissions;
use dom::workerglobalscope::WorkerGlobalScope;
use dom_struct::dom_struct;
// https://html.spec.whatwg.org/multipage/#workernavigator
#[dom_struct]
pub struct WorkerNavigator {
reflector_: Reflector,
permissions: MutNullableJS<Permissions>,
}
impl WorkerNavigator {
fn new_inherited() -> WorkerNavigator {
WorkerNavigator {
reflector_: Reflector::new(),
permissions: Default::default(),
}
}
pub fn new(global: &WorkerGlobalScope) -> Root<WorkerNavigator> {
reflect_dom_object(box WorkerNavigator::new_inherited(),
global,
WorkerNavigatorBinding::Wrap)
}
}
impl WorkerNavigatorMethods for WorkerNavigator {
// https://html.spec.whatwg.org/multipage/#dom-navigator-product
fn Product(&self) -> DOMString {
navigatorinfo::Product()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-taintenabled
fn TaintEnabled(&self) -> bool {
navigatorinfo::TaintEnabled()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appname
fn AppName(&self) -> DOMString {
navigatorinfo::AppName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appcodename
fn AppCodeName(&self) -> DOMString {
navigatorinfo::AppCodeName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-platform
fn Platform(&self) -> DOMString {
navigatorinfo::Platform()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-useragent
fn UserAgent(&self) -> DOMString {
navigatorinfo::UserAgent()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appversion
fn AppVersion(&self) -> DOMString {
navigatorinfo::AppVersion()
}
// https://html.spec.whatwg.org/multipage/#navigatorlanguage
fn Language(&self) -> DOMString {
navigatorinfo::Language()
}
// https://w3c.github.io/permissions/#navigator-and-workernavigator-extension
fn Permissions(&self) -> Root<Permissions> { | self.permissions.or_init(|| Permissions::new(&self.global()))
}
} | random_line_split |
|
workernavigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::WorkerNavigatorBinding;
use dom::bindings::codegen::Bindings::WorkerNavigatorBinding::WorkerNavigatorMethods;
use dom::bindings::js::{MutNullableJS, Root};
use dom::bindings::reflector::{DomObject, Reflector, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::navigatorinfo;
use dom::permissions::Permissions;
use dom::workerglobalscope::WorkerGlobalScope;
use dom_struct::dom_struct;
// https://html.spec.whatwg.org/multipage/#workernavigator
#[dom_struct]
pub struct WorkerNavigator {
reflector_: Reflector,
permissions: MutNullableJS<Permissions>,
}
impl WorkerNavigator {
fn new_inherited() -> WorkerNavigator {
WorkerNavigator {
reflector_: Reflector::new(),
permissions: Default::default(),
}
}
pub fn new(global: &WorkerGlobalScope) -> Root<WorkerNavigator> {
reflect_dom_object(box WorkerNavigator::new_inherited(),
global,
WorkerNavigatorBinding::Wrap)
}
}
impl WorkerNavigatorMethods for WorkerNavigator {
// https://html.spec.whatwg.org/multipage/#dom-navigator-product
fn Product(&self) -> DOMString {
navigatorinfo::Product()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-taintenabled
fn TaintEnabled(&self) -> bool {
navigatorinfo::TaintEnabled()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appname
fn AppName(&self) -> DOMString {
navigatorinfo::AppName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appcodename
fn | (&self) -> DOMString {
navigatorinfo::AppCodeName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-platform
fn Platform(&self) -> DOMString {
navigatorinfo::Platform()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-useragent
fn UserAgent(&self) -> DOMString {
navigatorinfo::UserAgent()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appversion
fn AppVersion(&self) -> DOMString {
navigatorinfo::AppVersion()
}
// https://html.spec.whatwg.org/multipage/#navigatorlanguage
fn Language(&self) -> DOMString {
navigatorinfo::Language()
}
// https://w3c.github.io/permissions/#navigator-and-workernavigator-extension
fn Permissions(&self) -> Root<Permissions> {
self.permissions.or_init(|| Permissions::new(&self.global()))
}
}
| AppCodeName | identifier_name |
mem.rs | // mem.rs
// AltOSRust
//
// Created by Daniel Seitz on 12/6/16
/*
#[no_mangle]
pub unsafe extern fn __aeabi_memclr4(dest: *mut u32, mut n: isize) {
while n > 0 {
n -= 1;
*dest.offset(n) = 0;
}
}
#[no_mangle]
// TODO: Implement this, right now we don't do any reallocations, so it should never get called,
// but in the future we might want to do some memory reallocations
pub unsafe extern fn __aeabi_memmove(dest: *mut u8, src: *const u8, len: isize) {
panic!("Don't Reallocate Memory yet!");
//if dest.offset(0) >= src.offset(len)
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn memclr() {
let mut block: [u32; 10] = [0xAAAAAAAA; 10];
for i in 0..10 {
assert_eq!(block[i], 0xAAAAAAAA);
}
unsafe { __aeabi_memclr4(block.as_mut_ptr(), 10) }; |
for i in 0..10 {
assert_eq!(block[i], 0x0);
}
}
} | random_line_split |
|
mem.rs | // mem.rs
// AltOSRust
//
// Created by Daniel Seitz on 12/6/16
/*
#[no_mangle]
pub unsafe extern fn __aeabi_memclr4(dest: *mut u32, mut n: isize) {
while n > 0 {
n -= 1;
*dest.offset(n) = 0;
}
}
#[no_mangle]
// TODO: Implement this, right now we don't do any reallocations, so it should never get called,
// but in the future we might want to do some memory reallocations
pub unsafe extern fn __aeabi_memmove(dest: *mut u8, src: *const u8, len: isize) {
panic!("Don't Reallocate Memory yet!");
//if dest.offset(0) >= src.offset(len)
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let mut block: [u32; 10] = [0xAAAAAAAA; 10];
for i in 0..10 {
assert_eq!(block[i], 0xAAAAAAAA);
}
unsafe { __aeabi_memclr4(block.as_mut_ptr(), 10) };
for i in 0..10 {
assert_eq!(block[i], 0x0);
}
}
}
| memclr | identifier_name |
mem.rs | // mem.rs
// AltOSRust
//
// Created by Daniel Seitz on 12/6/16
/*
#[no_mangle]
pub unsafe extern fn __aeabi_memclr4(dest: *mut u32, mut n: isize) {
while n > 0 {
n -= 1;
*dest.offset(n) = 0;
}
}
#[no_mangle]
// TODO: Implement this, right now we don't do any reallocations, so it should never get called,
// but in the future we might want to do some memory reallocations
pub unsafe extern fn __aeabi_memmove(dest: *mut u8, src: *const u8, len: isize) {
panic!("Don't Reallocate Memory yet!");
//if dest.offset(0) >= src.offset(len)
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn memclr() |
}
| {
let mut block: [u32; 10] = [0xAAAAAAAA; 10];
for i in 0..10 {
assert_eq!(block[i], 0xAAAAAAAA);
}
unsafe { __aeabi_memclr4(block.as_mut_ptr(), 10) };
for i in 0..10 {
assert_eq!(block[i], 0x0);
}
} | identifier_body |
struct_with_packing.rs | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case, | non_camel_case_types,
non_upper_case_globals
)]
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct a {
pub b: ::std::os::raw::c_char,
pub c: ::std::os::raw::c_short,
}
#[test]
fn bindgen_test_layout_a() {
assert_eq!(
::std::mem::size_of::<a>(),
3usize,
concat!("Size of: ", stringify!(a))
);
assert_eq!(
::std::mem::align_of::<a>(),
1usize,
concat!("Alignment of ", stringify!(a))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<a>())).b as *const _ as usize },
0usize,
concat!("Offset of field: ", stringify!(a), "::", stringify!(b))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<a>())).c as *const _ as usize },
1usize,
concat!("Offset of field: ", stringify!(a), "::", stringify!(c))
);
} | random_line_split |
|
struct_with_packing.rs | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct | {
pub b: ::std::os::raw::c_char,
pub c: ::std::os::raw::c_short,
}
#[test]
fn bindgen_test_layout_a() {
assert_eq!(
::std::mem::size_of::<a>(),
3usize,
concat!("Size of: ", stringify!(a))
);
assert_eq!(
::std::mem::align_of::<a>(),
1usize,
concat!("Alignment of ", stringify!(a))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<a>())).b as *const _ as usize },
0usize,
concat!("Offset of field: ", stringify!(a), "::", stringify!(b))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<a>())).c as *const _ as usize },
1usize,
concat!("Offset of field: ", stringify!(a), "::", stringify!(c))
);
}
| a | identifier_name |
logf32.rs | #![feature(core, core_intrinsics, core_float)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::logf32;
use core::num::Float;
use core::f32;
use core::f32::consts::E;
// pub fn logf32(x: f32) -> f32;
#[test]
fn logf32_test1() {
let x: f32 = f32::nan();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result.is_nan(), true);
}
#[test]
fn logf32_test2() {
let x: f32 = f32::infinity();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, f32::infinity());
}
#[test]
fn logf32_test3() |
#[test]
fn logf32_test4() {
let x: f32 = 1.0;
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, 0.0);
}
#[test]
fn logf32_test5() {
let x: f32 = E;
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, 0.99999994);
}
}
| {
let x: f32 = f32::neg_infinity();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result.is_nan(), true);
} | identifier_body |
logf32.rs | #![feature(core, core_intrinsics, core_float)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::logf32;
use core::num::Float;
use core::f32;
use core::f32::consts::E;
// pub fn logf32(x: f32) -> f32;
#[test]
fn logf32_test1() {
let x: f32 = f32::nan();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result.is_nan(), true);
}
#[test]
fn logf32_test2() {
let x: f32 = f32::infinity();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, f32::infinity());
}
#[test]
fn logf32_test3() {
let x: f32 = f32::neg_infinity();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result.is_nan(), true);
}
#[test]
fn logf32_test4() {
let x: f32 = 1.0;
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, 0.0);
}
#[test]
fn | () {
let x: f32 = E;
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, 0.99999994);
}
}
| logf32_test5 | identifier_name |
logf32.rs | #![feature(core, core_intrinsics, core_float)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::logf32;
use core::num::Float;
use core::f32;
use core::f32::consts::E;
// pub fn logf32(x: f32) -> f32;
#[test]
fn logf32_test1() {
let x: f32 = f32::nan();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result.is_nan(), true);
}
#[test]
fn logf32_test2() { |
#[test]
fn logf32_test3() {
let x: f32 = f32::neg_infinity();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result.is_nan(), true);
}
#[test]
fn logf32_test4() {
let x: f32 = 1.0;
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, 0.0);
}
#[test]
fn logf32_test5() {
let x: f32 = E;
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, 0.99999994);
}
} | let x: f32 = f32::infinity();
let result: f32 = unsafe { logf32(x) };
assert_eq!(result, f32::infinity());
} | random_line_split |
synom.rs | // Copyright 2018 Syn Developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Parsing interface for parsing a token stream into a syntax tree node.
//!
//! Parsing in Syn is built on parser functions that take in a [`Cursor`] and
//! produce a [`PResult<T>`] where `T` is some syntax tree node. `Cursor` is a
//! cheaply copyable cursor over a range of tokens in a token stream, and
//! `PResult` is a result that packages together a parsed syntax tree node `T`
//! with a stream of remaining unparsed tokens after `T` represented as another
//! `Cursor`, or a [`ParseError`] if parsing failed.
//!
//! [`Cursor`]:../buffer/index.html
//! [`PResult<T>`]: type.PResult.html
//! [`ParseError`]: struct.ParseError.html
//!
//! This `Cursor`- and `PResult`-based interface is convenient for parser
//! combinators and parser implementations, but not necessarily when you just
//! have some tokens that you want to parse. For that we expose the following
//! two entry points.
//!
//! ## The `syn::parse*` functions
//!
//! The [`syn::parse`], [`syn::parse2`], and [`syn::parse_str`] functions serve
//! as an entry point for parsing syntax tree nodes that can be parsed in an
//! obvious default way. These functions can return any syntax tree node that
//! implements the [`Synom`] trait, which includes most types in Syn.
//!
//! [`syn::parse`]:../fn.parse.html
//! [`syn::parse2`]:../fn.parse2.html
//! [`syn::parse_str`]:../fn.parse_str.html
//! [`Synom`]: trait.Synom.html
//!
//! ```
//! use syn::Type;
//!
//! # fn run_parser() -> Result<(), syn::synom::ParseError> {
//! let t: Type = syn::parse_str("std::collections::HashMap<String, Value>")?;
//! # Ok(())
//! # }
//! #
//! # fn main() {
//! # run_parser().unwrap();
//! # }
//! ```
//!
//! The [`parse_quote!`] macro also uses this approach.
//!
//! [`parse_quote!`]:../macro.parse_quote.html
//!
//! ## The `Parser` trait
//!
//! Some types can be parsed in several ways depending on context. For example
//! an [`Attribute`] can be either "outer" like `#[...]` or "inner" like
//! `#![...]` and parsing the wrong one would be a bug. Similarly [`Punctuated`]
//! may or may not allow trailing punctuation, and parsing it the wrong way
//! would either reject valid input or accept invalid input.
//!
//! [`Attribute`]:../struct.Attribute.html
//! [`Punctuated`]:../punctuated/index.html
//!
//! The `Synom` trait is not implemented in these cases because there is no good
//! behavior to consider the default.
//!
//! ```ignore
//! // Can't parse `Punctuated` without knowing whether trailing punctuation
//! // should be allowed in this context.
//! let path: Punctuated<PathSegment, Token![::]> = syn::parse(tokens)?;
//! ```
//!
//! In these cases the types provide a choice of parser functions rather than a
//! single `Synom` implementation, and those parser functions can be invoked
//! through the [`Parser`] trait.
//!
//! [`Parser`]: trait.Parser.html
//!
//! ```
//! # #[macro_use]
//! # extern crate syn;
//! #
//! # extern crate proc_macro2;
//! # use proc_macro2::TokenStream;
//! #
//! use syn::synom::Parser;
//! use syn::punctuated::Punctuated;
//! use syn::{PathSegment, Expr, Attribute};
//!
//! # fn run_parsers() -> Result<(), syn::synom::ParseError> {
//! # let tokens = TokenStream::new().into();
//! // Parse a nonempty sequence of path segments separated by `::` punctuation
//! // with no trailing punctuation.
//! let parser = Punctuated::<PathSegment, Token![::]>::parse_separated_nonempty;
//! let path = parser.parse(tokens)?;
//!
//! # let tokens = TokenStream::new().into();
//! // Parse a possibly empty sequence of expressions terminated by commas with
//! // an optional trailing punctuation.
//! let parser = Punctuated::<Expr, Token![,]>::parse_terminated;
//! let args = parser.parse(tokens)?;
//!
//! # let tokens = TokenStream::new().into();
//! // Parse zero or more outer attributes but not inner attributes.
//! named!(outer_attrs -> Vec<Attribute>, many0!(Attribute::parse_outer));
//! let attrs = outer_attrs.parse(tokens)?;
//! #
//! # Ok(())
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! # Implementing a parser function
//!
//! Parser functions are usually implemented using the [`nom`]-style parser
//! combinator macros provided by Syn, but may also be implemented without
//! macros be using the low-level [`Cursor`] API directly.
//!
//! [`nom`]: https://github.com/Geal/nom
//!
//! The following parser combinator macros are available and a `Synom` parsing
//! example is provided for each one.
//!
//! - [`alt!`](../macro.alt.html)
//! - [`braces!`](../macro.braces.html)
//! - [`brackets!`](../macro.brackets.html)
//! - [`call!`](../macro.call.html)
//! - [`cond!`](../macro.cond.html)
//! - [`cond_reduce!`](../macro.cond_reduce.html)
//! - [`custom_keyword!`](../macro.custom_keyword.html)
//! - [`do_parse!`](../macro.do_parse.html)
//! - [`epsilon!`](../macro.epsilon.html)
//! - [`input_end!`](../macro.input_end.html)
//! - [`keyword!`](../macro.keyword.html)
//! - [`many0!`](../macro.many0.html)
//! - [`map!`](../macro.map.html)
//! - [`not!`](../macro.not.html)
//! - [`option!`](../macro.option.html)
//! - [`parens!`](../macro.parens.html)
//! - [`punct!`](../macro.punct.html)
//! - [`reject!`](../macro.reject.html)
//! - [`switch!`](../macro.switch.html)
//! - [`syn!`](../macro.syn.html)
//! - [`tuple!`](../macro.tuple.html)
//! - [`value!`](../macro.value.html)
//!
//! *This module is available if Syn is built with the `"parsing"` feature.*
#[cfg(feature = "proc-macro")]
use proc_macro;
use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, TokenStream, TokenTree};
use error::parse_error;
pub use error::{PResult, ParseError};
use buffer::{Cursor, TokenBuffer};
/// Parsing interface implemented by all types that can be parsed in a default
/// way from a token stream.
///
/// Refer to the [module documentation] for details about parsing in Syn.
///
/// [module documentation]: index.html
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait Synom: Sized {
fn parse(input: Cursor) -> PResult<Self>;
/// A short name of the type being parsed.
///
/// The description should only be used for a simple name. It should not
/// contain newlines or sentence-ending punctuation, to facilitate embedding in
/// larger user-facing strings. Syn will use this description when building
/// error messages about parse failures.
///
/// # Examples
///
/// ```
/// # use syn::buffer::Cursor;
/// # use syn::synom::{Synom, PResult};
/// #
/// struct ExprMacro {
/// //...
/// }
///
/// impl Synom for ExprMacro {
/// # fn parse(input: Cursor) -> PResult<Self> { unimplemented!() }
/// // fn parse(...) ->... {... }
///
/// fn description() -> Option<&'static str> {
/// // Will result in messages like
/// //
/// // "failed to parse macro invocation expression: $reason"
/// Some("macro invocation expression")
/// }
/// }
/// ```
fn description() -> Option<&'static str> {
None
}
}
impl Synom for TokenStream {
fn parse(input: Cursor) -> PResult<Self> {
Ok((input.token_stream(), Cursor::empty()))
}
fn description() -> Option<&'static str> {
Some("arbitrary token stream")
}
}
impl Synom for TokenTree {
fn parse(input: Cursor) -> PResult<Self> {
match input.token_tree() {
Some((tt, rest)) => Ok((tt, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("token tree")
}
}
impl Synom for Group {
fn parse(input: Cursor) -> PResult<Self> {
for delim in &[Delimiter::Parenthesis, Delimiter::Brace, Delimiter::Bracket] {
match input.group(*delim) {
Some((inside, span, rest)) => {
let mut group = Group::new(*delim, inside.token_stream());
group.set_span(span);
return Ok((group, rest));
}
None => {}
}
}
parse_error()
}
fn description() -> Option<&'static str> {
Some("group token")
}
}
impl Synom for Ident {
fn parse(input: Cursor) -> PResult<Self> {
let (ident, rest) = match input.ident() {
Some(ident) => ident,
_ => return parse_error(),
};
match &ident.to_string()[..] {
"_"
// From https://doc.rust-lang.org/grammar.html#keywords
| "abstract" | "alignof" | "as" | "become" | "box" | "break" | "const"
| "continue" | "crate" | "do" | "else" | "enum" | "extern" | "false" | "final"
| "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "macro" | "match"
| "mod" | "move" | "mut" | "offsetof" | "override" | "priv" | "proc" | "pub"
| "pure" | "ref" | "return" | "Self" | "self" | "sizeof" | "static" | "struct"
| "super" | "trait" | "true" | "type" | "typeof" | "unsafe" | "unsized" | "use"
| "virtual" | "where" | "while" | "yield" => return parse_error(),
_ => {}
}
Ok((ident, rest))
}
fn description() -> Option<&'static str> {
Some("identifier")
}
}
impl Synom for Punct {
fn parse(input: Cursor) -> PResult<Self> {
match input.punct() {
Some((punct, rest)) => Ok((punct, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("punctuation token")
}
}
impl Synom for Literal {
fn parse(input: Cursor) -> PResult<Self> {
match input.literal() {
Some((literal, rest)) => Ok((literal, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("literal token")
}
}
/// Parser that can parse Rust tokens into a particular syntax tree node.
///
/// Refer to the [module documentation] for details about parsing in Syn.
///
/// [module documentation]: index.html
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait Parser: Sized {
type Output;
/// Parse a proc-macro2 token stream into the chosen syntax tree node.
fn parse2(self, tokens: TokenStream) -> Result<Self::Output, ParseError>;
/// Parse tokens of source code into the chosen syntax tree node.
///
/// *This method is available if Syn is built with both the `"parsing"` and
/// `"proc-macro"` features.*
#[cfg(feature = "proc-macro")]
fn parse(self, tokens: proc_macro::TokenStream) -> Result<Self::Output, ParseError> {
self.parse2(tokens.into())
}
/// Parse a string of Rust code into the chosen syntax tree node.
///
/// # Hygiene
///
/// Every span in the resulting syntax tree will be set to resolve at the
/// macro call site.
fn | (self, s: &str) -> Result<Self::Output, ParseError> {
match s.parse() {
Ok(tts) => self.parse2(tts),
Err(_) => Err(ParseError::new("error while lexing input string")),
}
}
}
impl<F, T> Parser for F
where
F: FnOnce(Cursor) -> PResult<T>,
{
type Output = T;
fn parse2(self, tokens: TokenStream) -> Result<T, ParseError> {
let buf = TokenBuffer::new2(tokens);
let (t, rest) = self(buf.begin())?;
if rest.eof() {
Ok(t)
} else if rest == buf.begin() {
// parsed nothing
Err(ParseError::new("failed to parse anything"))
} else {
Err(ParseError::new("failed to parse all tokens"))
}
}
}
/// Extension traits that are made available within the `call!` parser.
///
/// *This module is available if Syn is built with the `"parsing"` feature.*
pub mod ext {
use super::*;
use proc_macro2::Ident;
/// Additional parsing methods for `Ident`.
///
/// This trait is sealed and cannot be implemented for types outside of Syn.
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait IdentExt: Sized + private::Sealed {
/// Parses any identifier including keywords.
///
/// This is useful when parsing a DSL which allows Rust keywords as
/// identifiers.
///
/// ```rust
/// #[macro_use]
/// extern crate syn;
///
/// use syn::Ident;
///
/// // Parses input that looks like `name = NAME` where `NAME` can be
/// // any identifier.
/// //
/// // Examples:
/// //
/// // name = anything
/// // name = impl
/// named!(parse_dsl -> Ident, do_parse!(
/// custom_keyword!(name) >>
/// punct!(=) >>
/// name: call!(Ident::parse_any) >>
/// (name)
/// ));
/// #
/// # fn main() {}
/// ```
fn parse_any(input: Cursor) -> PResult<Self>;
}
impl IdentExt for Ident {
fn parse_any(input: Cursor) -> PResult<Self> {
input.ident().map_or_else(parse_error, Ok)
}
}
mod private {
use proc_macro2::Ident;
pub trait Sealed {}
impl Sealed for Ident {}
}
}
| parse_str | identifier_name |
synom.rs | // Copyright 2018 Syn Developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Parsing interface for parsing a token stream into a syntax tree node.
//!
//! Parsing in Syn is built on parser functions that take in a [`Cursor`] and
//! produce a [`PResult<T>`] where `T` is some syntax tree node. `Cursor` is a
//! cheaply copyable cursor over a range of tokens in a token stream, and
//! `PResult` is a result that packages together a parsed syntax tree node `T`
//! with a stream of remaining unparsed tokens after `T` represented as another
//! `Cursor`, or a [`ParseError`] if parsing failed.
//!
//! [`Cursor`]:../buffer/index.html
//! [`PResult<T>`]: type.PResult.html
//! [`ParseError`]: struct.ParseError.html
//!
//! This `Cursor`- and `PResult`-based interface is convenient for parser
//! combinators and parser implementations, but not necessarily when you just
//! have some tokens that you want to parse. For that we expose the following
//! two entry points.
//!
//! ## The `syn::parse*` functions
//!
//! The [`syn::parse`], [`syn::parse2`], and [`syn::parse_str`] functions serve
//! as an entry point for parsing syntax tree nodes that can be parsed in an
//! obvious default way. These functions can return any syntax tree node that
//! implements the [`Synom`] trait, which includes most types in Syn.
//!
//! [`syn::parse`]:../fn.parse.html
//! [`syn::parse2`]:../fn.parse2.html
//! [`syn::parse_str`]:../fn.parse_str.html
//! [`Synom`]: trait.Synom.html
//!
//! ```
//! use syn::Type;
//!
//! # fn run_parser() -> Result<(), syn::synom::ParseError> {
//! let t: Type = syn::parse_str("std::collections::HashMap<String, Value>")?;
//! # Ok(())
//! # }
//! #
//! # fn main() {
//! # run_parser().unwrap();
//! # }
//! ```
//!
//! The [`parse_quote!`] macro also uses this approach.
//!
//! [`parse_quote!`]:../macro.parse_quote.html
//!
//! ## The `Parser` trait
//!
//! Some types can be parsed in several ways depending on context. For example
//! an [`Attribute`] can be either "outer" like `#[...]` or "inner" like
//! `#![...]` and parsing the wrong one would be a bug. Similarly [`Punctuated`]
//! may or may not allow trailing punctuation, and parsing it the wrong way
//! would either reject valid input or accept invalid input.
//!
//! [`Attribute`]:../struct.Attribute.html
//! [`Punctuated`]:../punctuated/index.html
//!
//! The `Synom` trait is not implemented in these cases because there is no good
//! behavior to consider the default.
//!
//! ```ignore
//! // Can't parse `Punctuated` without knowing whether trailing punctuation
//! // should be allowed in this context.
//! let path: Punctuated<PathSegment, Token![::]> = syn::parse(tokens)?;
//! ```
//!
//! In these cases the types provide a choice of parser functions rather than a
//! single `Synom` implementation, and those parser functions can be invoked
//! through the [`Parser`] trait.
//!
//! [`Parser`]: trait.Parser.html
//!
//! ```
//! # #[macro_use]
//! # extern crate syn;
//! #
//! # extern crate proc_macro2;
//! # use proc_macro2::TokenStream;
//! #
//! use syn::synom::Parser;
//! use syn::punctuated::Punctuated;
//! use syn::{PathSegment, Expr, Attribute};
//!
//! # fn run_parsers() -> Result<(), syn::synom::ParseError> {
//! # let tokens = TokenStream::new().into();
//! // Parse a nonempty sequence of path segments separated by `::` punctuation
//! // with no trailing punctuation.
//! let parser = Punctuated::<PathSegment, Token![::]>::parse_separated_nonempty;
//! let path = parser.parse(tokens)?;
//!
//! # let tokens = TokenStream::new().into();
//! // Parse a possibly empty sequence of expressions terminated by commas with
//! // an optional trailing punctuation.
//! let parser = Punctuated::<Expr, Token![,]>::parse_terminated;
//! let args = parser.parse(tokens)?;
//!
//! # let tokens = TokenStream::new().into();
//! // Parse zero or more outer attributes but not inner attributes.
//! named!(outer_attrs -> Vec<Attribute>, many0!(Attribute::parse_outer));
//! let attrs = outer_attrs.parse(tokens)?;
//! #
//! # Ok(())
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! # Implementing a parser function
//!
//! Parser functions are usually implemented using the [`nom`]-style parser
//! combinator macros provided by Syn, but may also be implemented without
//! macros be using the low-level [`Cursor`] API directly.
//!
//! [`nom`]: https://github.com/Geal/nom
//!
//! The following parser combinator macros are available and a `Synom` parsing
//! example is provided for each one.
//!
//! - [`alt!`](../macro.alt.html)
//! - [`braces!`](../macro.braces.html)
//! - [`brackets!`](../macro.brackets.html)
//! - [`call!`](../macro.call.html)
//! - [`cond!`](../macro.cond.html)
//! - [`cond_reduce!`](../macro.cond_reduce.html)
//! - [`custom_keyword!`](../macro.custom_keyword.html)
//! - [`do_parse!`](../macro.do_parse.html)
//! - [`epsilon!`](../macro.epsilon.html)
//! - [`input_end!`](../macro.input_end.html)
//! - [`keyword!`](../macro.keyword.html)
//! - [`many0!`](../macro.many0.html)
//! - [`map!`](../macro.map.html)
//! - [`not!`](../macro.not.html)
//! - [`option!`](../macro.option.html)
//! - [`parens!`](../macro.parens.html)
//! - [`punct!`](../macro.punct.html)
//! - [`reject!`](../macro.reject.html)
//! - [`switch!`](../macro.switch.html)
//! - [`syn!`](../macro.syn.html)
//! - [`tuple!`](../macro.tuple.html)
//! - [`value!`](../macro.value.html)
//!
//! *This module is available if Syn is built with the `"parsing"` feature.*
#[cfg(feature = "proc-macro")]
use proc_macro;
use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, TokenStream, TokenTree};
use error::parse_error;
pub use error::{PResult, ParseError};
use buffer::{Cursor, TokenBuffer};
/// Parsing interface implemented by all types that can be parsed in a default
/// way from a token stream.
///
/// Refer to the [module documentation] for details about parsing in Syn.
///
/// [module documentation]: index.html
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait Synom: Sized {
fn parse(input: Cursor) -> PResult<Self>;
/// A short name of the type being parsed.
///
/// The description should only be used for a simple name. It should not
/// contain newlines or sentence-ending punctuation, to facilitate embedding in
/// larger user-facing strings. Syn will use this description when building
/// error messages about parse failures.
///
/// # Examples
///
/// ```
/// # use syn::buffer::Cursor;
/// # use syn::synom::{Synom, PResult};
/// #
/// struct ExprMacro {
/// //...
/// }
///
/// impl Synom for ExprMacro {
/// # fn parse(input: Cursor) -> PResult<Self> { unimplemented!() }
/// // fn parse(...) ->... {... }
///
/// fn description() -> Option<&'static str> {
/// // Will result in messages like
/// //
/// // "failed to parse macro invocation expression: $reason"
/// Some("macro invocation expression")
/// }
/// }
/// ```
fn description() -> Option<&'static str> {
None
}
}
impl Synom for TokenStream {
fn parse(input: Cursor) -> PResult<Self> {
Ok((input.token_stream(), Cursor::empty()))
}
fn description() -> Option<&'static str> {
Some("arbitrary token stream")
}
}
impl Synom for TokenTree {
fn parse(input: Cursor) -> PResult<Self> {
match input.token_tree() {
Some((tt, rest)) => Ok((tt, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("token tree")
}
}
impl Synom for Group {
fn parse(input: Cursor) -> PResult<Self> {
for delim in &[Delimiter::Parenthesis, Delimiter::Brace, Delimiter::Bracket] {
match input.group(*delim) {
Some((inside, span, rest)) => {
let mut group = Group::new(*delim, inside.token_stream());
group.set_span(span);
return Ok((group, rest));
}
None => {}
}
}
parse_error()
}
fn description() -> Option<&'static str> {
Some("group token")
}
}
impl Synom for Ident {
fn parse(input: Cursor) -> PResult<Self> {
let (ident, rest) = match input.ident() {
Some(ident) => ident,
_ => return parse_error(),
};
match &ident.to_string()[..] {
"_"
// From https://doc.rust-lang.org/grammar.html#keywords
| "abstract" | "alignof" | "as" | "become" | "box" | "break" | "const"
| "continue" | "crate" | "do" | "else" | "enum" | "extern" | "false" | "final"
| "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "macro" | "match"
| "mod" | "move" | "mut" | "offsetof" | "override" | "priv" | "proc" | "pub"
| "pure" | "ref" | "return" | "Self" | "self" | "sizeof" | "static" | "struct"
| "super" | "trait" | "true" | "type" | "typeof" | "unsafe" | "unsized" | "use"
| "virtual" | "where" | "while" | "yield" => return parse_error(),
_ => {}
}
Ok((ident, rest))
}
fn description() -> Option<&'static str> {
Some("identifier")
}
}
impl Synom for Punct {
fn parse(input: Cursor) -> PResult<Self> {
match input.punct() {
Some((punct, rest)) => Ok((punct, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("punctuation token")
}
}
impl Synom for Literal {
fn parse(input: Cursor) -> PResult<Self> {
match input.literal() {
Some((literal, rest)) => Ok((literal, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("literal token")
}
}
/// Parser that can parse Rust tokens into a particular syntax tree node.
///
/// Refer to the [module documentation] for details about parsing in Syn.
///
/// [module documentation]: index.html
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait Parser: Sized {
type Output;
/// Parse a proc-macro2 token stream into the chosen syntax tree node.
fn parse2(self, tokens: TokenStream) -> Result<Self::Output, ParseError>;
/// Parse tokens of source code into the chosen syntax tree node.
///
/// *This method is available if Syn is built with both the `"parsing"` and
/// `"proc-macro"` features.*
#[cfg(feature = "proc-macro")]
fn parse(self, tokens: proc_macro::TokenStream) -> Result<Self::Output, ParseError> {
self.parse2(tokens.into())
}
/// Parse a string of Rust code into the chosen syntax tree node.
///
/// # Hygiene
///
/// Every span in the resulting syntax tree will be set to resolve at the
/// macro call site.
fn parse_str(self, s: &str) -> Result<Self::Output, ParseError> {
match s.parse() {
Ok(tts) => self.parse2(tts),
Err(_) => Err(ParseError::new("error while lexing input string")),
}
}
}
impl<F, T> Parser for F
where
F: FnOnce(Cursor) -> PResult<T>,
{
type Output = T;
fn parse2(self, tokens: TokenStream) -> Result<T, ParseError> {
let buf = TokenBuffer::new2(tokens);
let (t, rest) = self(buf.begin())?;
if rest.eof() {
Ok(t)
} else if rest == buf.begin() {
// parsed nothing | }
/// Extension traits that are made available within the `call!` parser.
///
/// *This module is available if Syn is built with the `"parsing"` feature.*
pub mod ext {
use super::*;
use proc_macro2::Ident;
/// Additional parsing methods for `Ident`.
///
/// This trait is sealed and cannot be implemented for types outside of Syn.
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait IdentExt: Sized + private::Sealed {
/// Parses any identifier including keywords.
///
/// This is useful when parsing a DSL which allows Rust keywords as
/// identifiers.
///
/// ```rust
/// #[macro_use]
/// extern crate syn;
///
/// use syn::Ident;
///
/// // Parses input that looks like `name = NAME` where `NAME` can be
/// // any identifier.
/// //
/// // Examples:
/// //
/// // name = anything
/// // name = impl
/// named!(parse_dsl -> Ident, do_parse!(
/// custom_keyword!(name) >>
/// punct!(=) >>
/// name: call!(Ident::parse_any) >>
/// (name)
/// ));
/// #
/// # fn main() {}
/// ```
fn parse_any(input: Cursor) -> PResult<Self>;
}
impl IdentExt for Ident {
fn parse_any(input: Cursor) -> PResult<Self> {
input.ident().map_or_else(parse_error, Ok)
}
}
mod private {
use proc_macro2::Ident;
pub trait Sealed {}
impl Sealed for Ident {}
}
} | Err(ParseError::new("failed to parse anything"))
} else {
Err(ParseError::new("failed to parse all tokens"))
}
} | random_line_split |
synom.rs | // Copyright 2018 Syn Developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Parsing interface for parsing a token stream into a syntax tree node.
//!
//! Parsing in Syn is built on parser functions that take in a [`Cursor`] and
//! produce a [`PResult<T>`] where `T` is some syntax tree node. `Cursor` is a
//! cheaply copyable cursor over a range of tokens in a token stream, and
//! `PResult` is a result that packages together a parsed syntax tree node `T`
//! with a stream of remaining unparsed tokens after `T` represented as another
//! `Cursor`, or a [`ParseError`] if parsing failed.
//!
//! [`Cursor`]:../buffer/index.html
//! [`PResult<T>`]: type.PResult.html
//! [`ParseError`]: struct.ParseError.html
//!
//! This `Cursor`- and `PResult`-based interface is convenient for parser
//! combinators and parser implementations, but not necessarily when you just
//! have some tokens that you want to parse. For that we expose the following
//! two entry points.
//!
//! ## The `syn::parse*` functions
//!
//! The [`syn::parse`], [`syn::parse2`], and [`syn::parse_str`] functions serve
//! as an entry point for parsing syntax tree nodes that can be parsed in an
//! obvious default way. These functions can return any syntax tree node that
//! implements the [`Synom`] trait, which includes most types in Syn.
//!
//! [`syn::parse`]:../fn.parse.html
//! [`syn::parse2`]:../fn.parse2.html
//! [`syn::parse_str`]:../fn.parse_str.html
//! [`Synom`]: trait.Synom.html
//!
//! ```
//! use syn::Type;
//!
//! # fn run_parser() -> Result<(), syn::synom::ParseError> {
//! let t: Type = syn::parse_str("std::collections::HashMap<String, Value>")?;
//! # Ok(())
//! # }
//! #
//! # fn main() {
//! # run_parser().unwrap();
//! # }
//! ```
//!
//! The [`parse_quote!`] macro also uses this approach.
//!
//! [`parse_quote!`]:../macro.parse_quote.html
//!
//! ## The `Parser` trait
//!
//! Some types can be parsed in several ways depending on context. For example
//! an [`Attribute`] can be either "outer" like `#[...]` or "inner" like
//! `#![...]` and parsing the wrong one would be a bug. Similarly [`Punctuated`]
//! may or may not allow trailing punctuation, and parsing it the wrong way
//! would either reject valid input or accept invalid input.
//!
//! [`Attribute`]:../struct.Attribute.html
//! [`Punctuated`]:../punctuated/index.html
//!
//! The `Synom` trait is not implemented in these cases because there is no good
//! behavior to consider the default.
//!
//! ```ignore
//! // Can't parse `Punctuated` without knowing whether trailing punctuation
//! // should be allowed in this context.
//! let path: Punctuated<PathSegment, Token![::]> = syn::parse(tokens)?;
//! ```
//!
//! In these cases the types provide a choice of parser functions rather than a
//! single `Synom` implementation, and those parser functions can be invoked
//! through the [`Parser`] trait.
//!
//! [`Parser`]: trait.Parser.html
//!
//! ```
//! # #[macro_use]
//! # extern crate syn;
//! #
//! # extern crate proc_macro2;
//! # use proc_macro2::TokenStream;
//! #
//! use syn::synom::Parser;
//! use syn::punctuated::Punctuated;
//! use syn::{PathSegment, Expr, Attribute};
//!
//! # fn run_parsers() -> Result<(), syn::synom::ParseError> {
//! # let tokens = TokenStream::new().into();
//! // Parse a nonempty sequence of path segments separated by `::` punctuation
//! // with no trailing punctuation.
//! let parser = Punctuated::<PathSegment, Token![::]>::parse_separated_nonempty;
//! let path = parser.parse(tokens)?;
//!
//! # let tokens = TokenStream::new().into();
//! // Parse a possibly empty sequence of expressions terminated by commas with
//! // an optional trailing punctuation.
//! let parser = Punctuated::<Expr, Token![,]>::parse_terminated;
//! let args = parser.parse(tokens)?;
//!
//! # let tokens = TokenStream::new().into();
//! // Parse zero or more outer attributes but not inner attributes.
//! named!(outer_attrs -> Vec<Attribute>, many0!(Attribute::parse_outer));
//! let attrs = outer_attrs.parse(tokens)?;
//! #
//! # Ok(())
//! # }
//! #
//! # fn main() {}
//! ```
//!
//! # Implementing a parser function
//!
//! Parser functions are usually implemented using the [`nom`]-style parser
//! combinator macros provided by Syn, but may also be implemented without
//! macros be using the low-level [`Cursor`] API directly.
//!
//! [`nom`]: https://github.com/Geal/nom
//!
//! The following parser combinator macros are available and a `Synom` parsing
//! example is provided for each one.
//!
//! - [`alt!`](../macro.alt.html)
//! - [`braces!`](../macro.braces.html)
//! - [`brackets!`](../macro.brackets.html)
//! - [`call!`](../macro.call.html)
//! - [`cond!`](../macro.cond.html)
//! - [`cond_reduce!`](../macro.cond_reduce.html)
//! - [`custom_keyword!`](../macro.custom_keyword.html)
//! - [`do_parse!`](../macro.do_parse.html)
//! - [`epsilon!`](../macro.epsilon.html)
//! - [`input_end!`](../macro.input_end.html)
//! - [`keyword!`](../macro.keyword.html)
//! - [`many0!`](../macro.many0.html)
//! - [`map!`](../macro.map.html)
//! - [`not!`](../macro.not.html)
//! - [`option!`](../macro.option.html)
//! - [`parens!`](../macro.parens.html)
//! - [`punct!`](../macro.punct.html)
//! - [`reject!`](../macro.reject.html)
//! - [`switch!`](../macro.switch.html)
//! - [`syn!`](../macro.syn.html)
//! - [`tuple!`](../macro.tuple.html)
//! - [`value!`](../macro.value.html)
//!
//! *This module is available if Syn is built with the `"parsing"` feature.*
#[cfg(feature = "proc-macro")]
use proc_macro;
use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, TokenStream, TokenTree};
use error::parse_error;
pub use error::{PResult, ParseError};
use buffer::{Cursor, TokenBuffer};
/// Parsing interface implemented by all types that can be parsed in a default
/// way from a token stream.
///
/// Refer to the [module documentation] for details about parsing in Syn.
///
/// [module documentation]: index.html
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait Synom: Sized {
fn parse(input: Cursor) -> PResult<Self>;
/// A short name of the type being parsed.
///
/// The description should only be used for a simple name. It should not
/// contain newlines or sentence-ending punctuation, to facilitate embedding in
/// larger user-facing strings. Syn will use this description when building
/// error messages about parse failures.
///
/// # Examples
///
/// ```
/// # use syn::buffer::Cursor;
/// # use syn::synom::{Synom, PResult};
/// #
/// struct ExprMacro {
/// //...
/// }
///
/// impl Synom for ExprMacro {
/// # fn parse(input: Cursor) -> PResult<Self> { unimplemented!() }
/// // fn parse(...) ->... {... }
///
/// fn description() -> Option<&'static str> {
/// // Will result in messages like
/// //
/// // "failed to parse macro invocation expression: $reason"
/// Some("macro invocation expression")
/// }
/// }
/// ```
fn description() -> Option<&'static str> {
None
}
}
impl Synom for TokenStream {
fn parse(input: Cursor) -> PResult<Self> {
Ok((input.token_stream(), Cursor::empty()))
}
fn description() -> Option<&'static str> {
Some("arbitrary token stream")
}
}
impl Synom for TokenTree {
fn parse(input: Cursor) -> PResult<Self> {
match input.token_tree() {
Some((tt, rest)) => Ok((tt, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("token tree")
}
}
impl Synom for Group {
fn parse(input: Cursor) -> PResult<Self> {
for delim in &[Delimiter::Parenthesis, Delimiter::Brace, Delimiter::Bracket] {
match input.group(*delim) {
Some((inside, span, rest)) => {
let mut group = Group::new(*delim, inside.token_stream());
group.set_span(span);
return Ok((group, rest));
}
None => {}
}
}
parse_error()
}
fn description() -> Option<&'static str> {
Some("group token")
}
}
impl Synom for Ident {
fn parse(input: Cursor) -> PResult<Self> {
let (ident, rest) = match input.ident() {
Some(ident) => ident,
_ => return parse_error(),
};
match &ident.to_string()[..] {
"_"
// From https://doc.rust-lang.org/grammar.html#keywords
| "abstract" | "alignof" | "as" | "become" | "box" | "break" | "const"
| "continue" | "crate" | "do" | "else" | "enum" | "extern" | "false" | "final"
| "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "macro" | "match"
| "mod" | "move" | "mut" | "offsetof" | "override" | "priv" | "proc" | "pub"
| "pure" | "ref" | "return" | "Self" | "self" | "sizeof" | "static" | "struct"
| "super" | "trait" | "true" | "type" | "typeof" | "unsafe" | "unsized" | "use"
| "virtual" | "where" | "while" | "yield" => return parse_error(),
_ => {}
}
Ok((ident, rest))
}
fn description() -> Option<&'static str> {
Some("identifier")
}
}
impl Synom for Punct {
fn parse(input: Cursor) -> PResult<Self> {
match input.punct() {
Some((punct, rest)) => Ok((punct, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("punctuation token")
}
}
impl Synom for Literal {
fn parse(input: Cursor) -> PResult<Self> {
match input.literal() {
Some((literal, rest)) => Ok((literal, rest)),
None => parse_error(),
}
}
fn description() -> Option<&'static str> {
Some("literal token")
}
}
/// Parser that can parse Rust tokens into a particular syntax tree node.
///
/// Refer to the [module documentation] for details about parsing in Syn.
///
/// [module documentation]: index.html
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait Parser: Sized {
type Output;
/// Parse a proc-macro2 token stream into the chosen syntax tree node.
fn parse2(self, tokens: TokenStream) -> Result<Self::Output, ParseError>;
/// Parse tokens of source code into the chosen syntax tree node.
///
/// *This method is available if Syn is built with both the `"parsing"` and
/// `"proc-macro"` features.*
#[cfg(feature = "proc-macro")]
fn parse(self, tokens: proc_macro::TokenStream) -> Result<Self::Output, ParseError> {
self.parse2(tokens.into())
}
/// Parse a string of Rust code into the chosen syntax tree node.
///
/// # Hygiene
///
/// Every span in the resulting syntax tree will be set to resolve at the
/// macro call site.
fn parse_str(self, s: &str) -> Result<Self::Output, ParseError> |
}
impl<F, T> Parser for F
where
F: FnOnce(Cursor) -> PResult<T>,
{
type Output = T;
fn parse2(self, tokens: TokenStream) -> Result<T, ParseError> {
let buf = TokenBuffer::new2(tokens);
let (t, rest) = self(buf.begin())?;
if rest.eof() {
Ok(t)
} else if rest == buf.begin() {
// parsed nothing
Err(ParseError::new("failed to parse anything"))
} else {
Err(ParseError::new("failed to parse all tokens"))
}
}
}
/// Extension traits that are made available within the `call!` parser.
///
/// *This module is available if Syn is built with the `"parsing"` feature.*
pub mod ext {
use super::*;
use proc_macro2::Ident;
/// Additional parsing methods for `Ident`.
///
/// This trait is sealed and cannot be implemented for types outside of Syn.
///
/// *This trait is available if Syn is built with the `"parsing"` feature.*
pub trait IdentExt: Sized + private::Sealed {
/// Parses any identifier including keywords.
///
/// This is useful when parsing a DSL which allows Rust keywords as
/// identifiers.
///
/// ```rust
/// #[macro_use]
/// extern crate syn;
///
/// use syn::Ident;
///
/// // Parses input that looks like `name = NAME` where `NAME` can be
/// // any identifier.
/// //
/// // Examples:
/// //
/// // name = anything
/// // name = impl
/// named!(parse_dsl -> Ident, do_parse!(
/// custom_keyword!(name) >>
/// punct!(=) >>
/// name: call!(Ident::parse_any) >>
/// (name)
/// ));
/// #
/// # fn main() {}
/// ```
fn parse_any(input: Cursor) -> PResult<Self>;
}
impl IdentExt for Ident {
fn parse_any(input: Cursor) -> PResult<Self> {
input.ident().map_or_else(parse_error, Ok)
}
}
mod private {
use proc_macro2::Ident;
pub trait Sealed {}
impl Sealed for Ident {}
}
}
| {
match s.parse() {
Ok(tts) => self.parse2(tts),
Err(_) => Err(ParseError::new("error while lexing input string")),
}
} | identifier_body |
chunk.rs | use std::cell::RefCell;
use std::collections::HashMap;
use crate::array::*;
use crate::shader::Vertex;
use gfx;
#[derive(Copy, Clone)]
pub struct BlockState {
pub value: u16,
}
pub const EMPTY_BLOCK: BlockState = BlockState { value: 0 };
#[derive(Copy, Clone)]
pub struct BiomeId {
pub value: u8,
}
#[derive(Copy, Clone)]
pub struct LightLevel {
pub value: u8,
}
impl LightLevel {
pub fn block_light(self) -> u8 {
self.value & 0xf
}
pub fn sky_light(self) -> u8 {
self.value >> 4
}
} |
pub const SIZE: usize = 16;
/// A chunk of SIZE x SIZE x SIZE blocks, in YZX order.
#[derive(Copy, Clone)]
pub struct Chunk {
pub blocks: [[[BlockState; SIZE]; SIZE]; SIZE],
pub light_levels: [[[LightLevel; SIZE]; SIZE]; SIZE],
}
// TODO: Change to const pointer.
pub const EMPTY_CHUNK: &Chunk = &Chunk {
blocks: [[[EMPTY_BLOCK; SIZE]; SIZE]; SIZE],
light_levels: [[[LightLevel { value: 0xf0 }; SIZE]; SIZE]; SIZE],
};
pub struct ChunkColumn<R: gfx::Resources> {
pub chunks: Vec<Chunk>,
pub buffers: [RefCell<Option<gfx::handle::Buffer<R, Vertex>>>; SIZE],
pub biomes: [[BiomeId; SIZE]; SIZE],
}
pub struct ChunkManager<R: gfx::Resources> {
chunk_columns: HashMap<(i32, i32), ChunkColumn<R>>,
}
impl<R: gfx::Resources> ChunkManager<R> {
pub fn new() -> ChunkManager<R> {
ChunkManager {
chunk_columns: HashMap::new(),
}
}
pub fn add_chunk_column(&mut self, x: i32, z: i32, c: ChunkColumn<R>) {
self.chunk_columns.insert((x, z), c);
}
pub fn each_chunk_and_neighbors<'a, F>(&'a self, mut f: F)
where
F: FnMut(
/*coords:*/ [i32; 3],
/*buffer:*/ &'a RefCell<Option<gfx::handle::Buffer<R, Vertex>>>,
/*chunks:*/ [[[&'a Chunk; 3]; 3]; 3],
/*biomes:*/ [[Option<&'a [[BiomeId; SIZE]; SIZE]>; 3]; 3],
),
{
for &(x, z) in self.chunk_columns.keys() {
let columns =
[-1, 0, 1].map(|dz| [-1, 0, 1].map(|dx| self.chunk_columns.get(&(x + dx, z + dz))));
let central = columns[1][1].unwrap();
for y in 0..central.chunks.len() {
let chunks = [-1, 0, 1].map(|dy| {
let y = y as i32 + dy;
columns.map(|cz| {
cz.map(|cx| {
cx.and_then(|c| c.chunks[..].get(y as usize))
.unwrap_or(EMPTY_CHUNK)
})
})
});
f(
[x, y as i32, z],
¢ral.buffers[y],
chunks,
columns.map(|cz| cz.map(|cx| cx.map(|c| &c.biomes))),
)
}
}
}
pub fn each_chunk<F>(&self, mut f: F)
where
F: FnMut(
/*x:*/ i32,
/*y:*/ i32,
/*z:*/ i32,
/*c:*/ &Chunk,
/*b:*/ &RefCell<Option<gfx::handle::Buffer<R, Vertex>>>,
),
{
for (&(x, z), c) in self.chunk_columns.iter() {
for (y, (c, b)) in c.chunks.iter().zip(c.buffers.iter()).enumerate() {
f(x, y as i32, z, c, b)
}
}
}
} | random_line_split |
|
chunk.rs | use std::cell::RefCell;
use std::collections::HashMap;
use crate::array::*;
use crate::shader::Vertex;
use gfx;
#[derive(Copy, Clone)]
pub struct BlockState {
pub value: u16,
}
pub const EMPTY_BLOCK: BlockState = BlockState { value: 0 };
#[derive(Copy, Clone)]
pub struct BiomeId {
pub value: u8,
}
#[derive(Copy, Clone)]
pub struct LightLevel {
pub value: u8,
}
impl LightLevel {
pub fn | (self) -> u8 {
self.value & 0xf
}
pub fn sky_light(self) -> u8 {
self.value >> 4
}
}
pub const SIZE: usize = 16;
/// A chunk of SIZE x SIZE x SIZE blocks, in YZX order.
#[derive(Copy, Clone)]
pub struct Chunk {
pub blocks: [[[BlockState; SIZE]; SIZE]; SIZE],
pub light_levels: [[[LightLevel; SIZE]; SIZE]; SIZE],
}
// TODO: Change to const pointer.
pub const EMPTY_CHUNK: &Chunk = &Chunk {
blocks: [[[EMPTY_BLOCK; SIZE]; SIZE]; SIZE],
light_levels: [[[LightLevel { value: 0xf0 }; SIZE]; SIZE]; SIZE],
};
pub struct ChunkColumn<R: gfx::Resources> {
pub chunks: Vec<Chunk>,
pub buffers: [RefCell<Option<gfx::handle::Buffer<R, Vertex>>>; SIZE],
pub biomes: [[BiomeId; SIZE]; SIZE],
}
pub struct ChunkManager<R: gfx::Resources> {
chunk_columns: HashMap<(i32, i32), ChunkColumn<R>>,
}
impl<R: gfx::Resources> ChunkManager<R> {
pub fn new() -> ChunkManager<R> {
ChunkManager {
chunk_columns: HashMap::new(),
}
}
pub fn add_chunk_column(&mut self, x: i32, z: i32, c: ChunkColumn<R>) {
self.chunk_columns.insert((x, z), c);
}
pub fn each_chunk_and_neighbors<'a, F>(&'a self, mut f: F)
where
F: FnMut(
/*coords:*/ [i32; 3],
/*buffer:*/ &'a RefCell<Option<gfx::handle::Buffer<R, Vertex>>>,
/*chunks:*/ [[[&'a Chunk; 3]; 3]; 3],
/*biomes:*/ [[Option<&'a [[BiomeId; SIZE]; SIZE]>; 3]; 3],
),
{
for &(x, z) in self.chunk_columns.keys() {
let columns =
[-1, 0, 1].map(|dz| [-1, 0, 1].map(|dx| self.chunk_columns.get(&(x + dx, z + dz))));
let central = columns[1][1].unwrap();
for y in 0..central.chunks.len() {
let chunks = [-1, 0, 1].map(|dy| {
let y = y as i32 + dy;
columns.map(|cz| {
cz.map(|cx| {
cx.and_then(|c| c.chunks[..].get(y as usize))
.unwrap_or(EMPTY_CHUNK)
})
})
});
f(
[x, y as i32, z],
¢ral.buffers[y],
chunks,
columns.map(|cz| cz.map(|cx| cx.map(|c| &c.biomes))),
)
}
}
}
pub fn each_chunk<F>(&self, mut f: F)
where
F: FnMut(
/*x:*/ i32,
/*y:*/ i32,
/*z:*/ i32,
/*c:*/ &Chunk,
/*b:*/ &RefCell<Option<gfx::handle::Buffer<R, Vertex>>>,
),
{
for (&(x, z), c) in self.chunk_columns.iter() {
for (y, (c, b)) in c.chunks.iter().zip(c.buffers.iter()).enumerate() {
f(x, y as i32, z, c, b)
}
}
}
}
| block_light | identifier_name |
get_capabilities.rs | //! `GET /_matrix/client/*/capabilities`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3capabilities
use ruma_common::api::ruma_api;
use crate::capabilities::Capabilities;
ruma_api! {
metadata: {
description: "Gets information about the server's supported feature set and other relevant capabilities.",
method: GET,
name: "get_capabilities",
r0_path: "/_matrix/client/r0/capabilities",
stable_path: "/_matrix/client/v3/capabilities",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
#[derive(Default)]
request: {}
response: {
/// The capabilities the server supports
pub capabilities: Capabilities,
}
error: crate::Error
}
impl Request {
/// Creates an empty `Request`.
pub fn new() -> Self {
Self {}
}
}
impl Response {
/// Creates a new `Response` with the given capabilities.
pub fn | (capabilities: Capabilities) -> Self {
Self { capabilities }
}
}
impl From<Capabilities> for Response {
fn from(capabilities: Capabilities) -> Self {
Self::new(capabilities)
}
}
}
| new | identifier_name |
get_capabilities.rs | //! `GET /_matrix/client/*/capabilities`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3capabilities
use ruma_common::api::ruma_api;
use crate::capabilities::Capabilities;
ruma_api! {
metadata: {
description: "Gets information about the server's supported feature set and other relevant capabilities.",
method: GET,
name: "get_capabilities",
r0_path: "/_matrix/client/r0/capabilities",
stable_path: "/_matrix/client/v3/capabilities",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
#[derive(Default)]
request: {}
response: {
/// The capabilities the server supports
pub capabilities: Capabilities,
}
error: crate::Error
}
impl Request {
/// Creates an empty `Request`.
pub fn new() -> Self |
}
impl Response {
/// Creates a new `Response` with the given capabilities.
pub fn new(capabilities: Capabilities) -> Self {
Self { capabilities }
}
}
impl From<Capabilities> for Response {
fn from(capabilities: Capabilities) -> Self {
Self::new(capabilities)
}
}
}
| {
Self {}
} | identifier_body |
get_capabilities.rs | //! `GET /_matrix/client/*/capabilities`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3capabilities
use ruma_common::api::ruma_api;
use crate::capabilities::Capabilities;
ruma_api! {
metadata: {
description: "Gets information about the server's supported feature set and other relevant capabilities.",
method: GET,
name: "get_capabilities",
r0_path: "/_matrix/client/r0/capabilities",
stable_path: "/_matrix/client/v3/capabilities",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
#[derive(Default)]
request: {}
response: {
/// The capabilities the server supports
pub capabilities: Capabilities,
} | }
impl Request {
/// Creates an empty `Request`.
pub fn new() -> Self {
Self {}
}
}
impl Response {
/// Creates a new `Response` with the given capabilities.
pub fn new(capabilities: Capabilities) -> Self {
Self { capabilities }
}
}
impl From<Capabilities> for Response {
fn from(capabilities: Capabilities) -> Self {
Self::new(capabilities)
}
}
} |
error: crate::Error | random_line_split |
state_manager.rs | use status::*;
use std::sync::mpsc::Sender;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
#[derive(Clone)]
pub struct StateManager {
current_state: Status,
prev_state: Status,
is_web_requesting: bool,
is_bg_requesting: bool,
tx_state: Sender<(Status,Status)>,
to_print_screen: Arc<AtomicBool>
}
impl StateManager {
pub fn new (tx_state: Sender<(Status,Status)>) -> Self {
StateManager {
current_state: Status::Startup,
prev_state: Status::Startup,
is_web_requesting: false,
is_bg_requesting: false,
tx_state: tx_state,
to_print_screen: Arc::new(AtomicBool::new(false))
}
}
pub fn is_web_request (&self) -> bool {
self.is_web_requesting
}
pub fn set_web_request(&mut self, value: bool) {
self.is_web_requesting = value;
}
pub fn is_bg_request (&self) -> bool {
self.is_bg_requesting
}
pub fn set_bg_request(&mut self, value: bool) {
self.is_bg_requesting = value;
}
pub fn update_state(&mut self, value: Status) {
if self.current_state!= value |
}
pub fn get_state(&self) -> Status {
self.current_state
}
pub fn is_to_print_screen(&self) -> bool {
(*self.to_print_screen.clone()).load(Ordering::Relaxed)
}
pub fn set_to_print_screen(&mut self, value: bool) {
(*self.to_print_screen).store(value, Ordering::Relaxed)
}
}
| {
let prev_state_tmp = self.prev_state.clone();
let current_state_tmp = self.current_state.clone();
self.prev_state = self.current_state;
self.current_state = value;
self.tx_state.send((prev_state_tmp, current_state_tmp));
} | conditional_block |
state_manager.rs | use status::*;
use std::sync::mpsc::Sender;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
#[derive(Clone)]
pub struct StateManager {
current_state: Status,
prev_state: Status,
is_web_requesting: bool,
is_bg_requesting: bool,
tx_state: Sender<(Status,Status)>,
to_print_screen: Arc<AtomicBool>
}
impl StateManager {
pub fn new (tx_state: Sender<(Status,Status)>) -> Self {
StateManager {
current_state: Status::Startup,
prev_state: Status::Startup,
is_web_requesting: false,
is_bg_requesting: false,
tx_state: tx_state,
to_print_screen: Arc::new(AtomicBool::new(false))
}
}
pub fn is_web_request (&self) -> bool {
self.is_web_requesting
}
pub fn set_web_request(&mut self, value: bool) {
self.is_web_requesting = value;
}
pub fn is_bg_request (&self) -> bool {
self.is_bg_requesting
}
pub fn set_bg_request(&mut self, value: bool) {
self.is_bg_requesting = value;
}
pub fn update_state(&mut self, value: Status) {
if self.current_state!= value {
let prev_state_tmp = self.prev_state.clone();
let current_state_tmp = self.current_state.clone();
self.prev_state = self.current_state;
self.current_state = value;
self.tx_state.send((prev_state_tmp, current_state_tmp));
}
}
pub fn get_state(&self) -> Status { | self.current_state
}
pub fn is_to_print_screen(&self) -> bool {
(*self.to_print_screen.clone()).load(Ordering::Relaxed)
}
pub fn set_to_print_screen(&mut self, value: bool) {
(*self.to_print_screen).store(value, Ordering::Relaxed)
}
} | random_line_split |
|
state_manager.rs | use status::*;
use std::sync::mpsc::Sender;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
#[derive(Clone)]
pub struct StateManager {
current_state: Status,
prev_state: Status,
is_web_requesting: bool,
is_bg_requesting: bool,
tx_state: Sender<(Status,Status)>,
to_print_screen: Arc<AtomicBool>
}
impl StateManager {
pub fn new (tx_state: Sender<(Status,Status)>) -> Self {
StateManager {
current_state: Status::Startup,
prev_state: Status::Startup,
is_web_requesting: false,
is_bg_requesting: false,
tx_state: tx_state,
to_print_screen: Arc::new(AtomicBool::new(false))
}
}
pub fn is_web_request (&self) -> bool {
self.is_web_requesting
}
pub fn set_web_request(&mut self, value: bool) |
pub fn is_bg_request (&self) -> bool {
self.is_bg_requesting
}
pub fn set_bg_request(&mut self, value: bool) {
self.is_bg_requesting = value;
}
pub fn update_state(&mut self, value: Status) {
if self.current_state!= value {
let prev_state_tmp = self.prev_state.clone();
let current_state_tmp = self.current_state.clone();
self.prev_state = self.current_state;
self.current_state = value;
self.tx_state.send((prev_state_tmp, current_state_tmp));
}
}
pub fn get_state(&self) -> Status {
self.current_state
}
pub fn is_to_print_screen(&self) -> bool {
(*self.to_print_screen.clone()).load(Ordering::Relaxed)
}
pub fn set_to_print_screen(&mut self, value: bool) {
(*self.to_print_screen).store(value, Ordering::Relaxed)
}
}
| {
self.is_web_requesting = value;
} | identifier_body |
state_manager.rs | use status::*;
use std::sync::mpsc::Sender;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
#[derive(Clone)]
pub struct StateManager {
current_state: Status,
prev_state: Status,
is_web_requesting: bool,
is_bg_requesting: bool,
tx_state: Sender<(Status,Status)>,
to_print_screen: Arc<AtomicBool>
}
impl StateManager {
pub fn new (tx_state: Sender<(Status,Status)>) -> Self {
StateManager {
current_state: Status::Startup,
prev_state: Status::Startup,
is_web_requesting: false,
is_bg_requesting: false,
tx_state: tx_state,
to_print_screen: Arc::new(AtomicBool::new(false))
}
}
pub fn is_web_request (&self) -> bool {
self.is_web_requesting
}
pub fn set_web_request(&mut self, value: bool) {
self.is_web_requesting = value;
}
pub fn is_bg_request (&self) -> bool {
self.is_bg_requesting
}
pub fn set_bg_request(&mut self, value: bool) {
self.is_bg_requesting = value;
}
pub fn update_state(&mut self, value: Status) {
if self.current_state!= value {
let prev_state_tmp = self.prev_state.clone();
let current_state_tmp = self.current_state.clone();
self.prev_state = self.current_state;
self.current_state = value;
self.tx_state.send((prev_state_tmp, current_state_tmp));
}
}
pub fn | (&self) -> Status {
self.current_state
}
pub fn is_to_print_screen(&self) -> bool {
(*self.to_print_screen.clone()).load(Ordering::Relaxed)
}
pub fn set_to_print_screen(&mut self, value: bool) {
(*self.to_print_screen).store(value, Ordering::Relaxed)
}
}
| get_state | identifier_name |
vec.rs | /*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use rayon::prelude::*;
use std::cmp::PartialEq;
pub trait Contains<T> {
fn contains(&self, p: &T) -> bool;
}
impl<T: PartialEq> Contains<T> for Vec<T> {
fn contains(&self, p: &T) -> bool |
}
| {
self.iter().any(|e| e == p)
} | identifier_body |
vec.rs | /*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use rayon::prelude::*;
use std::cmp::PartialEq;
pub trait Contains<T> {
fn contains(&self, p: &T) -> bool;
}
impl<T: PartialEq> Contains<T> for Vec<T> {
fn | (&self, p: &T) -> bool {
self.iter().any(|e| e == p)
}
}
| contains | identifier_name |
vec.rs | /*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
| You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use rayon::prelude::*;
use std::cmp::PartialEq;
pub trait Contains<T> {
fn contains(&self, p: &T) -> bool;
}
impl<T: PartialEq> Contains<T> for Vec<T> {
fn contains(&self, p: &T) -> bool {
self.iter().any(|e| e == p)
}
} | random_line_split |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(step_by)]
#![deny(unsafe_code)]
extern crate cookie as cookie_rs;
extern crate heapsize;
#[macro_use]
extern crate heapsize_derive;
extern crate hyper;
extern crate hyper_serde;
extern crate image as piston_image;
extern crate ipc_channel;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate msg;
extern crate num_traits;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate servo_config;
extern crate servo_url;
extern crate url;
extern crate uuid;
extern crate webrender_traits;
use cookie_rs::Cookie;
use filemanager_thread::FileManagerThreadMsg;
use heapsize::HeapSizeOf;
use hyper::Error as HyperError;
use hyper::header::{ContentType, Headers, ReferrerPolicy as ReferrerPolicyHeader};
use hyper::http::RawStatus;
use hyper::mime::{Attr, Mime};
use hyper_serde::Serde;
use ipc_channel::Error as IpcError;
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use ipc_channel::router::ROUTER;
use request::{Request, RequestInit};
use response::{HttpsState, Response, ResponseInit};
use servo_url::ServoUrl;
use std::error::Error;
use storage_thread::StorageThreadMsg;
pub mod blob_url_store;
pub mod filemanager_thread;
pub mod image_cache;
pub mod net_error_list;
pub mod pub_domains;
pub mod request;
pub mod response;
pub mod storage_thread;
/// Image handling.
///
/// It may be surprising that this goes in the network crate as opposed to the graphics crate.
/// However, image handling is generally very integrated with the network stack (especially where
/// caching is involved) and as a result it must live in here.
pub mod image {
pub mod base;
}
/// A loading context, for context-specific sniffing, as defined in
/// https://mimesniff.spec.whatwg.org/#context-specific-sniffing
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum LoadContext {
Browsing,
Image,
AudioVideo,
Plugin,
Style,
Script,
Font,
TextTrack,
CacheManifest,
}
#[derive(Clone, Debug, Deserialize, Serialize, HeapSizeOf)]
pub struct CustomResponse {
#[ignore_heap_size_of = "Defined in hyper"]
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub headers: Headers,
#[ignore_heap_size_of = "Defined in hyper"]
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub raw_status: RawStatus,
pub body: Vec<u8>,
} | raw_status: raw_status,
body: body,
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct CustomResponseMediator {
pub response_chan: IpcSender<Option<CustomResponse>>,
pub load_url: ServoUrl,
}
/// [Policies](https://w3c.github.io/webappsec-referrer-policy/#referrer-policy-states)
/// for providing a referrer header for a request
#[derive(Clone, Copy, Debug, Deserialize, HeapSizeOf, Serialize)]
pub enum ReferrerPolicy {
/// "no-referrer"
NoReferrer,
/// "no-referrer-when-downgrade"
NoReferrerWhenDowngrade,
/// "origin"
Origin,
/// "same-origin"
SameOrigin,
/// "origin-when-cross-origin"
OriginWhenCrossOrigin,
/// "unsafe-url"
UnsafeUrl,
/// "strict-origin"
StrictOrigin,
/// "strict-origin-when-cross-origin"
StrictOriginWhenCrossOrigin,
}
impl<'a> From<&'a ReferrerPolicyHeader> for ReferrerPolicy {
fn from(policy: &'a ReferrerPolicyHeader) -> Self {
match *policy {
ReferrerPolicyHeader::NoReferrer =>
ReferrerPolicy::NoReferrer,
ReferrerPolicyHeader::NoReferrerWhenDowngrade =>
ReferrerPolicy::NoReferrerWhenDowngrade,
ReferrerPolicyHeader::SameOrigin =>
ReferrerPolicy::SameOrigin,
ReferrerPolicyHeader::Origin =>
ReferrerPolicy::Origin,
ReferrerPolicyHeader::OriginWhenCrossOrigin =>
ReferrerPolicy::OriginWhenCrossOrigin,
ReferrerPolicyHeader::UnsafeUrl =>
ReferrerPolicy::UnsafeUrl,
ReferrerPolicyHeader::StrictOrigin =>
ReferrerPolicy::StrictOrigin,
ReferrerPolicyHeader::StrictOriginWhenCrossOrigin =>
ReferrerPolicy::StrictOriginWhenCrossOrigin,
}
}
}
#[derive(Deserialize, Serialize)]
pub enum FetchResponseMsg {
// todo: should have fields for transmitted/total bytes
ProcessRequestBody,
ProcessRequestEOF,
// todo: send more info about the response (or perhaps the entire Response)
ProcessResponse(Result<FetchMetadata, NetworkError>),
ProcessResponseChunk(Vec<u8>),
ProcessResponseEOF(Result<(), NetworkError>),
}
pub trait FetchTaskTarget {
/// https://fetch.spec.whatwg.org/#process-request-body
///
/// Fired when a chunk of the request body is transmitted
fn process_request_body(&mut self, request: &Request);
/// https://fetch.spec.whatwg.org/#process-request-end-of-file
///
/// Fired when the entire request finishes being transmitted
fn process_request_eof(&mut self, request: &Request);
/// https://fetch.spec.whatwg.org/#process-response
///
/// Fired when headers are received
fn process_response(&mut self, response: &Response);
/// Fired when a chunk of response content is received
fn process_response_chunk(&mut self, chunk: Vec<u8>);
/// https://fetch.spec.whatwg.org/#process-response-end-of-file
///
/// Fired when the response is fully fetched
fn process_response_eof(&mut self, response: &Response);
}
#[derive(Clone, Serialize, Deserialize)]
pub enum FilteredMetadata {
Basic(Metadata),
Cors(Metadata),
Opaque,
OpaqueRedirect
}
#[derive(Clone, Serialize, Deserialize)]
pub enum FetchMetadata {
Unfiltered(Metadata),
Filtered {
filtered: FilteredMetadata,
unsafe_: Metadata,
},
}
pub trait FetchResponseListener {
fn process_request_body(&mut self);
fn process_request_eof(&mut self);
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>);
fn process_response_chunk(&mut self, chunk: Vec<u8>);
fn process_response_eof(&mut self, response: Result<(), NetworkError>);
}
impl FetchTaskTarget for IpcSender<FetchResponseMsg> {
fn process_request_body(&mut self, _: &Request) {
let _ = self.send(FetchResponseMsg::ProcessRequestBody);
}
fn process_request_eof(&mut self, _: &Request) {
let _ = self.send(FetchResponseMsg::ProcessRequestEOF);
}
fn process_response(&mut self, response: &Response) {
let _ = self.send(FetchResponseMsg::ProcessResponse(response.metadata()));
}
fn process_response_chunk(&mut self, chunk: Vec<u8>) {
let _ = self.send(FetchResponseMsg::ProcessResponseChunk(chunk));
}
fn process_response_eof(&mut self, response: &Response) {
if response.is_network_error() {
// todo: finer grained errors
let _ =
self.send(FetchResponseMsg::ProcessResponseEOF(Err(NetworkError::Internal("Network error".into()))));
} else {
let _ = self.send(FetchResponseMsg::ProcessResponseEOF(Ok(())));
}
}
}
pub trait Action<Listener> {
fn process(self, listener: &mut Listener);
}
impl<T: FetchResponseListener> Action<T> for FetchResponseMsg {
/// Execute the default action on a provided listener.
fn process(self, listener: &mut T) {
match self {
FetchResponseMsg::ProcessRequestBody => listener.process_request_body(),
FetchResponseMsg::ProcessRequestEOF => listener.process_request_eof(),
FetchResponseMsg::ProcessResponse(meta) => listener.process_response(meta),
FetchResponseMsg::ProcessResponseChunk(data) => listener.process_response_chunk(data),
FetchResponseMsg::ProcessResponseEOF(data) => listener.process_response_eof(data),
}
}
}
/// Handle to a resource thread
pub type CoreResourceThread = IpcSender<CoreResourceMsg>;
pub type IpcSendResult = Result<(), IpcError>;
/// Abstraction of the ability to send a particular type of message,
/// used by net_traits::ResourceThreads to ease the use its IpcSender sub-fields
/// XXX: If this trait will be used more in future, some auto derive might be appealing
pub trait IpcSend<T>
where T: serde::Serialize + serde::Deserialize,
{
/// send message T
fn send(&self, T) -> IpcSendResult;
/// get underlying sender
fn sender(&self) -> IpcSender<T>;
}
// FIXME: Originally we will construct an Arc<ResourceThread> from ResourceThread
// in script_thread to avoid some performance pitfall. Now we decide to deal with
// the "Arc" hack implicitly in future.
// See discussion: http://logs.glob.uno/?c=mozilla%23servo&s=16+May+2016&e=16+May+2016#c430412
// See also: https://github.com/servo/servo/blob/735480/components/script/script_thread.rs#L313
#[derive(Clone, Serialize, Deserialize)]
pub struct ResourceThreads {
core_thread: CoreResourceThread,
storage_thread: IpcSender<StorageThreadMsg>,
}
impl ResourceThreads {
pub fn new(c: CoreResourceThread, s: IpcSender<StorageThreadMsg>) -> ResourceThreads {
ResourceThreads {
core_thread: c,
storage_thread: s,
}
}
}
impl IpcSend<CoreResourceMsg> for ResourceThreads {
fn send(&self, msg: CoreResourceMsg) -> IpcSendResult {
self.core_thread.send(msg)
}
fn sender(&self) -> IpcSender<CoreResourceMsg> {
self.core_thread.clone()
}
}
impl IpcSend<StorageThreadMsg> for ResourceThreads {
fn send(&self, msg: StorageThreadMsg) -> IpcSendResult {
self.storage_thread.send(msg)
}
fn sender(&self) -> IpcSender<StorageThreadMsg> {
self.storage_thread.clone()
}
}
// Ignore the sub-fields
impl HeapSizeOf for ResourceThreads {
fn heap_size_of_children(&self) -> usize {
0
}
}
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum IncludeSubdomains {
Included,
NotIncluded,
}
#[derive(HeapSizeOf, Deserialize, Serialize)]
pub enum MessageData {
Text(String),
Binary(Vec<u8>),
}
#[derive(Deserialize, Serialize)]
pub enum WebSocketDomAction {
SendMessage(MessageData),
Close(Option<u16>, Option<String>),
}
#[derive(Deserialize, Serialize)]
pub enum WebSocketNetworkEvent {
ConnectionEstablished {
protocol_in_use: Option<String>,
},
MessageReceived(MessageData),
Close(Option<u16>, String),
Fail,
}
#[derive(Deserialize, Serialize)]
pub struct WebSocketCommunicate {
pub event_sender: IpcSender<WebSocketNetworkEvent>,
pub action_receiver: IpcReceiver<WebSocketDomAction>,
}
#[derive(Deserialize, Serialize)]
pub struct WebSocketConnectData {
pub resource_url: ServoUrl,
pub origin: String,
pub protocols: Vec<String>,
}
#[derive(Deserialize, Serialize)]
pub enum CoreResourceMsg {
Fetch(RequestInit, IpcSender<FetchResponseMsg>),
/// Initiate a fetch in response to processing a redirection
FetchRedirect(RequestInit, ResponseInit, IpcSender<FetchResponseMsg>),
/// Try to make a websocket connection to a URL.
WebsocketConnect(WebSocketCommunicate, WebSocketConnectData),
/// Store a cookie for a given originating URL
SetCookieForUrl(ServoUrl, Serde<Cookie<'static>>, CookieSource),
/// Store a set of cookies for a given originating URL
SetCookiesForUrl(ServoUrl, Vec<Serde<Cookie<'static>>>, CookieSource),
/// Retrieve the stored cookies for a given URL
GetCookiesForUrl(ServoUrl, IpcSender<Option<String>>, CookieSource),
/// Get a cookie by name for a given originating URL
GetCookiesDataForUrl(ServoUrl, IpcSender<Vec<Serde<Cookie<'static>>>>, CookieSource),
/// Cancel a network request corresponding to a given `ResourceId`
Cancel(ResourceId),
/// Synchronization message solely for knowing the state of the ResourceChannelManager loop
Synchronize(IpcSender<()>),
/// Send the network sender in constellation to CoreResourceThread
NetworkMediator(IpcSender<CustomResponseMediator>),
/// Message forwarded to file manager's handler
ToFileManager(FileManagerThreadMsg),
/// Break the load handler loop, send a reply when done cleaning up local resources
/// and exit
Exit(IpcSender<()>),
}
/// Instruct the resource thread to make a new request.
pub fn fetch_async<F>(request: RequestInit, core_resource_thread: &CoreResourceThread, f: F)
where F: Fn(FetchResponseMsg) + Send +'static,
{
let (action_sender, action_receiver) = ipc::channel().unwrap();
ROUTER.add_route(action_receiver.to_opaque(),
box move |message| f(message.to().unwrap()));
core_resource_thread.send(CoreResourceMsg::Fetch(request, action_sender)).unwrap();
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct ResourceCorsData {
/// CORS Preflight flag
pub preflight: bool,
/// Origin of CORS Request
pub origin: ServoUrl,
}
/// Metadata about a loaded resource, such as is obtained from HTTP headers.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct Metadata {
/// Final URL after redirects.
pub final_url: ServoUrl,
#[ignore_heap_size_of = "Defined in hyper"]
/// MIME type / subtype.
pub content_type: Option<Serde<ContentType>>,
/// Character set.
pub charset: Option<String>,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers
pub headers: Option<Serde<Headers>>,
/// HTTP Status
pub status: Option<(u16, Vec<u8>)>,
/// Is successful HTTPS connection
pub https_state: HttpsState,
/// Referrer Url
pub referrer: Option<ServoUrl>,
/// Referrer Policy of the Request used to obtain Response
pub referrer_policy: Option<ReferrerPolicy>,
}
impl Metadata {
/// Metadata with defaults for everything optional.
pub fn default(url: ServoUrl) -> Self {
Metadata {
final_url: url,
content_type: None,
charset: None,
headers: None,
// https://fetch.spec.whatwg.org/#concept-response-status-message
status: Some((200, b"OK".to_vec())),
https_state: HttpsState::None,
referrer: None,
referrer_policy: None,
}
}
/// Extract the parts of a Mime that we care about.
pub fn set_content_type(&mut self, content_type: Option<&Mime>) {
if self.headers.is_none() {
self.headers = Some(Serde(Headers::new()));
}
if let Some(mime) = content_type {
self.headers.as_mut().unwrap().set(ContentType(mime.clone()));
self.content_type = Some(Serde(ContentType(mime.clone())));
let Mime(_, _, ref parameters) = *mime;
for &(ref k, ref v) in parameters {
if Attr::Charset == *k {
self.charset = Some(v.to_string());
}
}
}
}
}
/// The creator of a given cookie
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum CookieSource {
/// An HTTP API
HTTP,
/// A non-HTTP API
NonHTTP,
}
/// Convenience function for synchronously loading a whole resource.
pub fn load_whole_resource(request: RequestInit,
core_resource_thread: &CoreResourceThread)
-> Result<(Metadata, Vec<u8>), NetworkError> {
let (action_sender, action_receiver) = ipc::channel().unwrap();
core_resource_thread.send(CoreResourceMsg::Fetch(request, action_sender)).unwrap();
let mut buf = vec![];
let mut metadata = None;
loop {
match action_receiver.recv().unwrap() {
FetchResponseMsg::ProcessRequestBody |
FetchResponseMsg::ProcessRequestEOF => (),
FetchResponseMsg::ProcessResponse(Ok(m)) => {
metadata = Some(match m {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_,.. } => unsafe_,
})
},
FetchResponseMsg::ProcessResponseChunk(data) => buf.extend_from_slice(&data),
FetchResponseMsg::ProcessResponseEOF(Ok(())) => return Ok((metadata.unwrap(), buf)),
FetchResponseMsg::ProcessResponse(Err(e)) |
FetchResponseMsg::ProcessResponseEOF(Err(e)) => return Err(e),
}
}
}
/// An unique identifier to keep track of each load message in the resource handler
#[derive(Clone, PartialEq, Eq, Copy, Hash, Debug, Deserialize, Serialize, HeapSizeOf)]
pub struct ResourceId(pub u32);
/// Network errors that have to be exported out of the loaders
#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum NetworkError {
/// Could be any of the internal errors, like unsupported scheme, connection errors, etc.
Internal(String),
LoadCancelled,
/// SSL validation error that has to be handled in the HTML parser
SslValidation(ServoUrl, String),
}
impl NetworkError {
pub fn from_hyper_error(url: &ServoUrl, error: HyperError) -> Self {
if let HyperError::Ssl(ref ssl_error) = error {
return NetworkError::from_ssl_error(url, &**ssl_error);
}
NetworkError::Internal(error.description().to_owned())
}
pub fn from_ssl_error(url: &ServoUrl, error: &Error) -> Self {
NetworkError::SslValidation(url.clone(), error.description().to_owned())
}
}
/// Normalize `slice`, as defined by
/// [the Fetch Spec](https://fetch.spec.whatwg.org/#concept-header-value-normalize).
pub fn trim_http_whitespace(mut slice: &[u8]) -> &[u8] {
const HTTP_WS_BYTES: &'static [u8] = b"\x09\x0A\x0D\x20";
loop {
match slice.split_first() {
Some((first, remainder)) if HTTP_WS_BYTES.contains(first) => slice = remainder,
_ => break,
}
}
loop {
match slice.split_last() {
Some((last, remainder)) if HTTP_WS_BYTES.contains(last) => slice = remainder,
_ => break,
}
}
slice
} |
impl CustomResponse {
pub fn new(headers: Headers, raw_status: RawStatus, body: Vec<u8>) -> CustomResponse {
CustomResponse {
headers: headers, | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(step_by)]
#![deny(unsafe_code)]
extern crate cookie as cookie_rs;
extern crate heapsize;
#[macro_use]
extern crate heapsize_derive;
extern crate hyper;
extern crate hyper_serde;
extern crate image as piston_image;
extern crate ipc_channel;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate msg;
extern crate num_traits;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate servo_config;
extern crate servo_url;
extern crate url;
extern crate uuid;
extern crate webrender_traits;
use cookie_rs::Cookie;
use filemanager_thread::FileManagerThreadMsg;
use heapsize::HeapSizeOf;
use hyper::Error as HyperError;
use hyper::header::{ContentType, Headers, ReferrerPolicy as ReferrerPolicyHeader};
use hyper::http::RawStatus;
use hyper::mime::{Attr, Mime};
use hyper_serde::Serde;
use ipc_channel::Error as IpcError;
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use ipc_channel::router::ROUTER;
use request::{Request, RequestInit};
use response::{HttpsState, Response, ResponseInit};
use servo_url::ServoUrl;
use std::error::Error;
use storage_thread::StorageThreadMsg;
pub mod blob_url_store;
pub mod filemanager_thread;
pub mod image_cache;
pub mod net_error_list;
pub mod pub_domains;
pub mod request;
pub mod response;
pub mod storage_thread;
/// Image handling.
///
/// It may be surprising that this goes in the network crate as opposed to the graphics crate.
/// However, image handling is generally very integrated with the network stack (especially where
/// caching is involved) and as a result it must live in here.
pub mod image {
pub mod base;
}
/// A loading context, for context-specific sniffing, as defined in
/// https://mimesniff.spec.whatwg.org/#context-specific-sniffing
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum LoadContext {
Browsing,
Image,
AudioVideo,
Plugin,
Style,
Script,
Font,
TextTrack,
CacheManifest,
}
#[derive(Clone, Debug, Deserialize, Serialize, HeapSizeOf)]
pub struct CustomResponse {
#[ignore_heap_size_of = "Defined in hyper"]
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub headers: Headers,
#[ignore_heap_size_of = "Defined in hyper"]
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub raw_status: RawStatus,
pub body: Vec<u8>,
}
impl CustomResponse {
pub fn new(headers: Headers, raw_status: RawStatus, body: Vec<u8>) -> CustomResponse {
CustomResponse {
headers: headers,
raw_status: raw_status,
body: body,
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct CustomResponseMediator {
pub response_chan: IpcSender<Option<CustomResponse>>,
pub load_url: ServoUrl,
}
/// [Policies](https://w3c.github.io/webappsec-referrer-policy/#referrer-policy-states)
/// for providing a referrer header for a request
#[derive(Clone, Copy, Debug, Deserialize, HeapSizeOf, Serialize)]
pub enum ReferrerPolicy {
/// "no-referrer"
NoReferrer,
/// "no-referrer-when-downgrade"
NoReferrerWhenDowngrade,
/// "origin"
Origin,
/// "same-origin"
SameOrigin,
/// "origin-when-cross-origin"
OriginWhenCrossOrigin,
/// "unsafe-url"
UnsafeUrl,
/// "strict-origin"
StrictOrigin,
/// "strict-origin-when-cross-origin"
StrictOriginWhenCrossOrigin,
}
impl<'a> From<&'a ReferrerPolicyHeader> for ReferrerPolicy {
fn from(policy: &'a ReferrerPolicyHeader) -> Self {
match *policy {
ReferrerPolicyHeader::NoReferrer =>
ReferrerPolicy::NoReferrer,
ReferrerPolicyHeader::NoReferrerWhenDowngrade =>
ReferrerPolicy::NoReferrerWhenDowngrade,
ReferrerPolicyHeader::SameOrigin =>
ReferrerPolicy::SameOrigin,
ReferrerPolicyHeader::Origin =>
ReferrerPolicy::Origin,
ReferrerPolicyHeader::OriginWhenCrossOrigin =>
ReferrerPolicy::OriginWhenCrossOrigin,
ReferrerPolicyHeader::UnsafeUrl =>
ReferrerPolicy::UnsafeUrl,
ReferrerPolicyHeader::StrictOrigin =>
ReferrerPolicy::StrictOrigin,
ReferrerPolicyHeader::StrictOriginWhenCrossOrigin =>
ReferrerPolicy::StrictOriginWhenCrossOrigin,
}
}
}
#[derive(Deserialize, Serialize)]
pub enum FetchResponseMsg {
// todo: should have fields for transmitted/total bytes
ProcessRequestBody,
ProcessRequestEOF,
// todo: send more info about the response (or perhaps the entire Response)
ProcessResponse(Result<FetchMetadata, NetworkError>),
ProcessResponseChunk(Vec<u8>),
ProcessResponseEOF(Result<(), NetworkError>),
}
pub trait FetchTaskTarget {
/// https://fetch.spec.whatwg.org/#process-request-body
///
/// Fired when a chunk of the request body is transmitted
fn process_request_body(&mut self, request: &Request);
/// https://fetch.spec.whatwg.org/#process-request-end-of-file
///
/// Fired when the entire request finishes being transmitted
fn process_request_eof(&mut self, request: &Request);
/// https://fetch.spec.whatwg.org/#process-response
///
/// Fired when headers are received
fn process_response(&mut self, response: &Response);
/// Fired when a chunk of response content is received
fn process_response_chunk(&mut self, chunk: Vec<u8>);
/// https://fetch.spec.whatwg.org/#process-response-end-of-file
///
/// Fired when the response is fully fetched
fn process_response_eof(&mut self, response: &Response);
}
#[derive(Clone, Serialize, Deserialize)]
pub enum FilteredMetadata {
Basic(Metadata),
Cors(Metadata),
Opaque,
OpaqueRedirect
}
#[derive(Clone, Serialize, Deserialize)]
pub enum FetchMetadata {
Unfiltered(Metadata),
Filtered {
filtered: FilteredMetadata,
unsafe_: Metadata,
},
}
pub trait FetchResponseListener {
fn process_request_body(&mut self);
fn process_request_eof(&mut self);
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>);
fn process_response_chunk(&mut self, chunk: Vec<u8>);
fn process_response_eof(&mut self, response: Result<(), NetworkError>);
}
impl FetchTaskTarget for IpcSender<FetchResponseMsg> {
fn process_request_body(&mut self, _: &Request) {
let _ = self.send(FetchResponseMsg::ProcessRequestBody);
}
fn process_request_eof(&mut self, _: &Request) {
let _ = self.send(FetchResponseMsg::ProcessRequestEOF);
}
fn process_response(&mut self, response: &Response) {
let _ = self.send(FetchResponseMsg::ProcessResponse(response.metadata()));
}
fn process_response_chunk(&mut self, chunk: Vec<u8>) {
let _ = self.send(FetchResponseMsg::ProcessResponseChunk(chunk));
}
fn process_response_eof(&mut self, response: &Response) {
if response.is_network_error() {
// todo: finer grained errors
let _ =
self.send(FetchResponseMsg::ProcessResponseEOF(Err(NetworkError::Internal("Network error".into()))));
} else {
let _ = self.send(FetchResponseMsg::ProcessResponseEOF(Ok(())));
}
}
}
pub trait Action<Listener> {
fn process(self, listener: &mut Listener);
}
impl<T: FetchResponseListener> Action<T> for FetchResponseMsg {
/// Execute the default action on a provided listener.
fn process(self, listener: &mut T) {
match self {
FetchResponseMsg::ProcessRequestBody => listener.process_request_body(),
FetchResponseMsg::ProcessRequestEOF => listener.process_request_eof(),
FetchResponseMsg::ProcessResponse(meta) => listener.process_response(meta),
FetchResponseMsg::ProcessResponseChunk(data) => listener.process_response_chunk(data),
FetchResponseMsg::ProcessResponseEOF(data) => listener.process_response_eof(data),
}
}
}
/// Handle to a resource thread
pub type CoreResourceThread = IpcSender<CoreResourceMsg>;
pub type IpcSendResult = Result<(), IpcError>;
/// Abstraction of the ability to send a particular type of message,
/// used by net_traits::ResourceThreads to ease the use its IpcSender sub-fields
/// XXX: If this trait will be used more in future, some auto derive might be appealing
pub trait IpcSend<T>
where T: serde::Serialize + serde::Deserialize,
{
/// send message T
fn send(&self, T) -> IpcSendResult;
/// get underlying sender
fn sender(&self) -> IpcSender<T>;
}
// FIXME: Originally we will construct an Arc<ResourceThread> from ResourceThread
// in script_thread to avoid some performance pitfall. Now we decide to deal with
// the "Arc" hack implicitly in future.
// See discussion: http://logs.glob.uno/?c=mozilla%23servo&s=16+May+2016&e=16+May+2016#c430412
// See also: https://github.com/servo/servo/blob/735480/components/script/script_thread.rs#L313
#[derive(Clone, Serialize, Deserialize)]
pub struct ResourceThreads {
core_thread: CoreResourceThread,
storage_thread: IpcSender<StorageThreadMsg>,
}
impl ResourceThreads {
pub fn new(c: CoreResourceThread, s: IpcSender<StorageThreadMsg>) -> ResourceThreads {
ResourceThreads {
core_thread: c,
storage_thread: s,
}
}
}
impl IpcSend<CoreResourceMsg> for ResourceThreads {
fn send(&self, msg: CoreResourceMsg) -> IpcSendResult {
self.core_thread.send(msg)
}
fn sender(&self) -> IpcSender<CoreResourceMsg> {
self.core_thread.clone()
}
}
impl IpcSend<StorageThreadMsg> for ResourceThreads {
fn send(&self, msg: StorageThreadMsg) -> IpcSendResult {
self.storage_thread.send(msg)
}
fn sender(&self) -> IpcSender<StorageThreadMsg> {
self.storage_thread.clone()
}
}
// Ignore the sub-fields
impl HeapSizeOf for ResourceThreads {
fn heap_size_of_children(&self) -> usize {
0
}
}
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum IncludeSubdomains {
Included,
NotIncluded,
}
#[derive(HeapSizeOf, Deserialize, Serialize)]
pub enum MessageData {
Text(String),
Binary(Vec<u8>),
}
#[derive(Deserialize, Serialize)]
pub enum WebSocketDomAction {
SendMessage(MessageData),
Close(Option<u16>, Option<String>),
}
#[derive(Deserialize, Serialize)]
pub enum WebSocketNetworkEvent {
ConnectionEstablished {
protocol_in_use: Option<String>,
},
MessageReceived(MessageData),
Close(Option<u16>, String),
Fail,
}
#[derive(Deserialize, Serialize)]
pub struct WebSocketCommunicate {
pub event_sender: IpcSender<WebSocketNetworkEvent>,
pub action_receiver: IpcReceiver<WebSocketDomAction>,
}
#[derive(Deserialize, Serialize)]
pub struct WebSocketConnectData {
pub resource_url: ServoUrl,
pub origin: String,
pub protocols: Vec<String>,
}
#[derive(Deserialize, Serialize)]
pub enum CoreResourceMsg {
Fetch(RequestInit, IpcSender<FetchResponseMsg>),
/// Initiate a fetch in response to processing a redirection
FetchRedirect(RequestInit, ResponseInit, IpcSender<FetchResponseMsg>),
/// Try to make a websocket connection to a URL.
WebsocketConnect(WebSocketCommunicate, WebSocketConnectData),
/// Store a cookie for a given originating URL
SetCookieForUrl(ServoUrl, Serde<Cookie<'static>>, CookieSource),
/// Store a set of cookies for a given originating URL
SetCookiesForUrl(ServoUrl, Vec<Serde<Cookie<'static>>>, CookieSource),
/// Retrieve the stored cookies for a given URL
GetCookiesForUrl(ServoUrl, IpcSender<Option<String>>, CookieSource),
/// Get a cookie by name for a given originating URL
GetCookiesDataForUrl(ServoUrl, IpcSender<Vec<Serde<Cookie<'static>>>>, CookieSource),
/// Cancel a network request corresponding to a given `ResourceId`
Cancel(ResourceId),
/// Synchronization message solely for knowing the state of the ResourceChannelManager loop
Synchronize(IpcSender<()>),
/// Send the network sender in constellation to CoreResourceThread
NetworkMediator(IpcSender<CustomResponseMediator>),
/// Message forwarded to file manager's handler
ToFileManager(FileManagerThreadMsg),
/// Break the load handler loop, send a reply when done cleaning up local resources
/// and exit
Exit(IpcSender<()>),
}
/// Instruct the resource thread to make a new request.
pub fn fetch_async<F>(request: RequestInit, core_resource_thread: &CoreResourceThread, f: F)
where F: Fn(FetchResponseMsg) + Send +'static,
{
let (action_sender, action_receiver) = ipc::channel().unwrap();
ROUTER.add_route(action_receiver.to_opaque(),
box move |message| f(message.to().unwrap()));
core_resource_thread.send(CoreResourceMsg::Fetch(request, action_sender)).unwrap();
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct ResourceCorsData {
/// CORS Preflight flag
pub preflight: bool,
/// Origin of CORS Request
pub origin: ServoUrl,
}
/// Metadata about a loaded resource, such as is obtained from HTTP headers.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct Metadata {
/// Final URL after redirects.
pub final_url: ServoUrl,
#[ignore_heap_size_of = "Defined in hyper"]
/// MIME type / subtype.
pub content_type: Option<Serde<ContentType>>,
/// Character set.
pub charset: Option<String>,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers
pub headers: Option<Serde<Headers>>,
/// HTTP Status
pub status: Option<(u16, Vec<u8>)>,
/// Is successful HTTPS connection
pub https_state: HttpsState,
/// Referrer Url
pub referrer: Option<ServoUrl>,
/// Referrer Policy of the Request used to obtain Response
pub referrer_policy: Option<ReferrerPolicy>,
}
impl Metadata {
/// Metadata with defaults for everything optional.
pub fn default(url: ServoUrl) -> Self {
Metadata {
final_url: url,
content_type: None,
charset: None,
headers: None,
// https://fetch.spec.whatwg.org/#concept-response-status-message
status: Some((200, b"OK".to_vec())),
https_state: HttpsState::None,
referrer: None,
referrer_policy: None,
}
}
/// Extract the parts of a Mime that we care about.
pub fn set_content_type(&mut self, content_type: Option<&Mime>) {
if self.headers.is_none() {
self.headers = Some(Serde(Headers::new()));
}
if let Some(mime) = content_type {
self.headers.as_mut().unwrap().set(ContentType(mime.clone()));
self.content_type = Some(Serde(ContentType(mime.clone())));
let Mime(_, _, ref parameters) = *mime;
for &(ref k, ref v) in parameters {
if Attr::Charset == *k {
self.charset = Some(v.to_string());
}
}
}
}
}
/// The creator of a given cookie
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum CookieSource {
/// An HTTP API
HTTP,
/// A non-HTTP API
NonHTTP,
}
/// Convenience function for synchronously loading a whole resource.
pub fn load_whole_resource(request: RequestInit,
core_resource_thread: &CoreResourceThread)
-> Result<(Metadata, Vec<u8>), NetworkError> {
let (action_sender, action_receiver) = ipc::channel().unwrap();
core_resource_thread.send(CoreResourceMsg::Fetch(request, action_sender)).unwrap();
let mut buf = vec![];
let mut metadata = None;
loop {
match action_receiver.recv().unwrap() {
FetchResponseMsg::ProcessRequestBody |
FetchResponseMsg::ProcessRequestEOF => (),
FetchResponseMsg::ProcessResponse(Ok(m)) => {
metadata = Some(match m {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_,.. } => unsafe_,
})
},
FetchResponseMsg::ProcessResponseChunk(data) => buf.extend_from_slice(&data),
FetchResponseMsg::ProcessResponseEOF(Ok(())) => return Ok((metadata.unwrap(), buf)),
FetchResponseMsg::ProcessResponse(Err(e)) |
FetchResponseMsg::ProcessResponseEOF(Err(e)) => return Err(e),
}
}
}
/// An unique identifier to keep track of each load message in the resource handler
#[derive(Clone, PartialEq, Eq, Copy, Hash, Debug, Deserialize, Serialize, HeapSizeOf)]
pub struct ResourceId(pub u32);
/// Network errors that have to be exported out of the loaders
#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum NetworkError {
/// Could be any of the internal errors, like unsupported scheme, connection errors, etc.
Internal(String),
LoadCancelled,
/// SSL validation error that has to be handled in the HTML parser
SslValidation(ServoUrl, String),
}
impl NetworkError {
pub fn from_hyper_error(url: &ServoUrl, error: HyperError) -> Self {
if let HyperError::Ssl(ref ssl_error) = error {
return NetworkError::from_ssl_error(url, &**ssl_error);
}
NetworkError::Internal(error.description().to_owned())
}
pub fn from_ssl_error(url: &ServoUrl, error: &Error) -> Self {
NetworkError::SslValidation(url.clone(), error.description().to_owned())
}
}
/// Normalize `slice`, as defined by
/// [the Fetch Spec](https://fetch.spec.whatwg.org/#concept-header-value-normalize).
pub fn | (mut slice: &[u8]) -> &[u8] {
const HTTP_WS_BYTES: &'static [u8] = b"\x09\x0A\x0D\x20";
loop {
match slice.split_first() {
Some((first, remainder)) if HTTP_WS_BYTES.contains(first) => slice = remainder,
_ => break,
}
}
loop {
match slice.split_last() {
Some((last, remainder)) if HTTP_WS_BYTES.contains(last) => slice = remainder,
_ => break,
}
}
slice
}
| trim_http_whitespace | identifier_name |
handler.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
impl<SD: SendDescUnicast, IC> SendDescUnicast for Handler<SD, IC> {}
impl<SD: SendDescMulticast, IC> SendDescMulticast for Handler<SD, IC> {}
/// Combinator for Send Descriptors created by [`SendDescExt::use_handler`].
#[derive(Debug)]
pub struct Handler<SD, F> {
pub(super) inner: SD,
pub(super) handler: F,
}
impl<SD, F, IC, R> SendDesc<IC, R> for Handler<SD, F>
where
SD: SendDesc<IC, ()> + Send,
IC: InboundContext,
R: Send,
F: FnMut(
Result<&dyn InboundContext<SocketAddr = IC::SocketAddr>, Error>,
) -> Result<ResponseStatus<R>, Error>
+ Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(&mut self, context: Result<&IC, Error>) -> Result<ResponseStatus<R>, Error> {
let inner_result = self.inner.handler(context);
let outer_result = (self.handler)(
context.map(|ic| ic as &dyn InboundContext<SocketAddr = IC::SocketAddr>),
);
if inner_result.is_err() || outer_result.is_err() | else {
outer_result
}
}
}
| {
Err(inner_result.err().or(outer_result.err()).unwrap())
} | conditional_block |
handler.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// | //
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
impl<SD: SendDescUnicast, IC> SendDescUnicast for Handler<SD, IC> {}
impl<SD: SendDescMulticast, IC> SendDescMulticast for Handler<SD, IC> {}
/// Combinator for Send Descriptors created by [`SendDescExt::use_handler`].
#[derive(Debug)]
pub struct Handler<SD, F> {
pub(super) inner: SD,
pub(super) handler: F,
}
impl<SD, F, IC, R> SendDesc<IC, R> for Handler<SD, F>
where
SD: SendDesc<IC, ()> + Send,
IC: InboundContext,
R: Send,
F: FnMut(
Result<&dyn InboundContext<SocketAddr = IC::SocketAddr>, Error>,
) -> Result<ResponseStatus<R>, Error>
+ Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(&mut self, context: Result<&IC, Error>) -> Result<ResponseStatus<R>, Error> {
let inner_result = self.inner.handler(context);
let outer_result = (self.handler)(
context.map(|ic| ic as &dyn InboundContext<SocketAddr = IC::SocketAddr>),
);
if inner_result.is_err() || outer_result.is_err() {
Err(inner_result.err().or(outer_result.err()).unwrap())
} else {
outer_result
}
}
} | // https://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
handler.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
impl<SD: SendDescUnicast, IC> SendDescUnicast for Handler<SD, IC> {}
impl<SD: SendDescMulticast, IC> SendDescMulticast for Handler<SD, IC> {}
/// Combinator for Send Descriptors created by [`SendDescExt::use_handler`].
#[derive(Debug)]
pub struct | <SD, F> {
pub(super) inner: SD,
pub(super) handler: F,
}
impl<SD, F, IC, R> SendDesc<IC, R> for Handler<SD, F>
where
SD: SendDesc<IC, ()> + Send,
IC: InboundContext,
R: Send,
F: FnMut(
Result<&dyn InboundContext<SocketAddr = IC::SocketAddr>, Error>,
) -> Result<ResponseStatus<R>, Error>
+ Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(&mut self, context: Result<&IC, Error>) -> Result<ResponseStatus<R>, Error> {
let inner_result = self.inner.handler(context);
let outer_result = (self.handler)(
context.map(|ic| ic as &dyn InboundContext<SocketAddr = IC::SocketAddr>),
);
if inner_result.is_err() || outer_result.is_err() {
Err(inner_result.err().or(outer_result.err()).unwrap())
} else {
outer_result
}
}
}
| Handler | identifier_name |
dissimilaroriginwindow.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DissimilarOriginWindowBinding;
use dom::bindings::codegen::Bindings::DissimilarOriginWindowBinding::DissimilarOriginWindowMethods;
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::bindings::str::DOMString;
use dom::bindings::structuredclone::StructuredCloneData;
use dom::dissimilaroriginlocation::DissimilarOriginLocation;
use dom::globalscope::GlobalScope;
use dom::windowproxy::WindowProxy;
use dom_struct::dom_struct;
use ipc_channel::ipc;
use js::jsapi::{JSContext, HandleValue};
use js::jsval::{JSVal, UndefinedValue};
use msg::constellation_msg::PipelineId;
use script_traits::ScriptMsg;
use servo_url::ImmutableOrigin;
use servo_url::MutableOrigin;
use servo_url::ServoUrl;
/// Represents a dissimilar-origin `Window` that exists in another script thread.
///
/// Since the `Window` is in a different script thread, we cannot access it
/// directly, but some of its accessors (for example `window.parent`)
/// still need to function.
///
/// In `windowproxy.rs`, we create a custom window proxy for these windows,
/// that throws security exceptions for most accessors. This is not a replacement
/// for XOWs, but provides belt-and-braces security.
#[dom_struct]
pub struct DissimilarOriginWindow {
/// The global for this window.
globalscope: GlobalScope,
/// The window proxy for this window.
window_proxy: Dom<WindowProxy>,
/// The location of this window, initialized lazily.
location: MutNullableDom<DissimilarOriginLocation>,
}
impl DissimilarOriginWindow {
#[allow(unsafe_code)]
pub fn new(
global_to_clone_from: &GlobalScope,
window_proxy: &WindowProxy,
) -> DomRoot<Self> {
let cx = global_to_clone_from.get_cx();
// Any timer events fired on this window are ignored.
let (timer_event_chan, _) = ipc::channel().unwrap();
let win = Box::new(Self {
globalscope: GlobalScope::new_inherited(
PipelineId::new(),
global_to_clone_from.devtools_chan().cloned(),
global_to_clone_from.mem_profiler_chan().clone(),
global_to_clone_from.time_profiler_chan().clone(),
global_to_clone_from.script_to_constellation_chan().clone(),
global_to_clone_from.scheduler_chan().clone(),
global_to_clone_from.resource_threads().clone(),
timer_event_chan,
global_to_clone_from.origin().clone(),
// FIXME(nox): The microtask queue is probably not important
// here, but this whole DOM interface is a hack anyway.
global_to_clone_from.microtask_queue().clone(),
),
window_proxy: Dom::from_ref(window_proxy),
location: Default::default(),
});
unsafe { DissimilarOriginWindowBinding::Wrap(cx, win) }
}
pub fn origin(&self) -> &MutableOrigin |
}
impl DissimilarOriginWindowMethods for DissimilarOriginWindow {
// https://html.spec.whatwg.org/multipage/#dom-window
fn Window(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-self
fn Self_(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-frames
fn Frames(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-parent
fn GetParent(&self) -> Option<DomRoot<WindowProxy>> {
// Steps 1-3.
if self.window_proxy.is_browsing_context_discarded() {
return None;
}
// Step 4.
if let Some(parent) = self.window_proxy.parent() {
return Some(DomRoot::from_ref(parent));
}
// Step 5.
Some(DomRoot::from_ref(&*self.window_proxy))
}
// https://html.spec.whatwg.org/multipage/#dom-top
fn GetTop(&self) -> Option<DomRoot<WindowProxy>> {
// Steps 1-3.
if self.window_proxy.is_browsing_context_discarded() {
return None;
}
// Steps 4-5.
Some(DomRoot::from_ref(self.window_proxy.top()))
}
// https://html.spec.whatwg.org/multipage/#dom-length
fn Length(&self) -> u32 {
// TODO: Implement x-origin length
0
}
// https://html.spec.whatwg.org/multipage/#dom-window-close
fn Close(&self) {
// TODO: Implement x-origin close
}
// https://html.spec.whatwg.org/multipage/#dom-window-closed
fn Closed(&self) -> bool {
// TODO: Implement x-origin close
false
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-window-postmessage
unsafe fn PostMessage(&self, cx: *mut JSContext, message: HandleValue, origin: DOMString) -> ErrorResult {
// Step 3-5.
let origin = match &origin[..] {
"*" => None,
"/" => {
// TODO: Should be the origin of the incumbent settings object.
None
},
url => match ServoUrl::parse(&url) {
Ok(url) => Some(url.origin()),
Err(_) => return Err(Error::Syntax),
}
};
// Step 1-2, 6-8.
// TODO(#12717): Should implement the `transfer` argument.
let data = StructuredCloneData::write(cx, message)?;
// Step 9.
self.post_message(origin, data);
Ok(())
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-opener
unsafe fn Opener(&self, _: *mut JSContext) -> JSVal {
// TODO: Implement x-origin opener
UndefinedValue()
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-opener
unsafe fn SetOpener(&self, _: *mut JSContext, _: HandleValue) {
// TODO: Implement x-origin opener
}
// https://html.spec.whatwg.org/multipage/#dom-window-blur
fn Blur(&self) {
// TODO: Implement x-origin blur
}
// https://html.spec.whatwg.org/multipage/#dom-focus
fn Focus(&self) {
// TODO: Implement x-origin focus
}
// https://html.spec.whatwg.org/multipage/#dom-location
fn Location(&self) -> DomRoot<DissimilarOriginLocation> {
self.location.or_init(|| DissimilarOriginLocation::new(self))
}
}
impl DissimilarOriginWindow {
pub fn post_message(&self, origin: Option<ImmutableOrigin>, data: StructuredCloneData) {
let msg = ScriptMsg::PostMessage(self.window_proxy.browsing_context_id(),
origin,
data.move_to_arraybuffer());
let _ = self.upcast::<GlobalScope>().script_to_constellation_chan().send(msg);
}
}
| {
self.upcast::<GlobalScope>().origin()
} | identifier_body |
dissimilaroriginwindow.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DissimilarOriginWindowBinding;
use dom::bindings::codegen::Bindings::DissimilarOriginWindowBinding::DissimilarOriginWindowMethods;
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::bindings::str::DOMString;
use dom::bindings::structuredclone::StructuredCloneData;
use dom::dissimilaroriginlocation::DissimilarOriginLocation;
use dom::globalscope::GlobalScope;
use dom::windowproxy::WindowProxy;
use dom_struct::dom_struct;
use ipc_channel::ipc;
use js::jsapi::{JSContext, HandleValue};
use js::jsval::{JSVal, UndefinedValue};
use msg::constellation_msg::PipelineId;
use script_traits::ScriptMsg;
use servo_url::ImmutableOrigin;
use servo_url::MutableOrigin;
use servo_url::ServoUrl;
/// Represents a dissimilar-origin `Window` that exists in another script thread.
///
/// Since the `Window` is in a different script thread, we cannot access it
/// directly, but some of its accessors (for example `window.parent`)
/// still need to function.
///
/// In `windowproxy.rs`, we create a custom window proxy for these windows,
/// that throws security exceptions for most accessors. This is not a replacement
/// for XOWs, but provides belt-and-braces security.
#[dom_struct]
pub struct DissimilarOriginWindow {
/// The global for this window.
globalscope: GlobalScope,
/// The window proxy for this window.
window_proxy: Dom<WindowProxy>,
/// The location of this window, initialized lazily.
location: MutNullableDom<DissimilarOriginLocation>,
}
impl DissimilarOriginWindow {
#[allow(unsafe_code)]
pub fn new(
global_to_clone_from: &GlobalScope,
window_proxy: &WindowProxy,
) -> DomRoot<Self> {
let cx = global_to_clone_from.get_cx();
// Any timer events fired on this window are ignored.
let (timer_event_chan, _) = ipc::channel().unwrap();
let win = Box::new(Self {
globalscope: GlobalScope::new_inherited(
PipelineId::new(),
global_to_clone_from.devtools_chan().cloned(),
global_to_clone_from.mem_profiler_chan().clone(),
global_to_clone_from.time_profiler_chan().clone(),
global_to_clone_from.script_to_constellation_chan().clone(),
global_to_clone_from.scheduler_chan().clone(),
global_to_clone_from.resource_threads().clone(),
timer_event_chan,
global_to_clone_from.origin().clone(),
// FIXME(nox): The microtask queue is probably not important
// here, but this whole DOM interface is a hack anyway.
global_to_clone_from.microtask_queue().clone(),
),
window_proxy: Dom::from_ref(window_proxy),
location: Default::default(),
});
unsafe { DissimilarOriginWindowBinding::Wrap(cx, win) }
}
pub fn origin(&self) -> &MutableOrigin {
self.upcast::<GlobalScope>().origin()
}
}
impl DissimilarOriginWindowMethods for DissimilarOriginWindow {
// https://html.spec.whatwg.org/multipage/#dom-window
fn Window(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-self
fn Self_(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-frames
fn Frames(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-parent
fn GetParent(&self) -> Option<DomRoot<WindowProxy>> {
// Steps 1-3.
if self.window_proxy.is_browsing_context_discarded() {
return None;
}
// Step 4.
if let Some(parent) = self.window_proxy.parent() {
return Some(DomRoot::from_ref(parent));
}
// Step 5.
Some(DomRoot::from_ref(&*self.window_proxy))
}
// https://html.spec.whatwg.org/multipage/#dom-top
fn GetTop(&self) -> Option<DomRoot<WindowProxy>> {
// Steps 1-3.
if self.window_proxy.is_browsing_context_discarded() {
return None;
}
// Steps 4-5.
Some(DomRoot::from_ref(self.window_proxy.top()))
}
// https://html.spec.whatwg.org/multipage/#dom-length
fn Length(&self) -> u32 {
// TODO: Implement x-origin length
0
}
// https://html.spec.whatwg.org/multipage/#dom-window-close
fn Close(&self) {
// TODO: Implement x-origin close
}
// https://html.spec.whatwg.org/multipage/#dom-window-closed
fn Closed(&self) -> bool {
// TODO: Implement x-origin close
false
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-window-postmessage
unsafe fn PostMessage(&self, cx: *mut JSContext, message: HandleValue, origin: DOMString) -> ErrorResult {
// Step 3-5.
let origin = match &origin[..] {
"*" => None,
"/" => {
// TODO: Should be the origin of the incumbent settings object.
None
},
url => match ServoUrl::parse(&url) {
Ok(url) => Some(url.origin()),
Err(_) => return Err(Error::Syntax),
}
};
// Step 1-2, 6-8.
// TODO(#12717): Should implement the `transfer` argument.
let data = StructuredCloneData::write(cx, message)?;
// Step 9.
self.post_message(origin, data);
Ok(())
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-opener
unsafe fn Opener(&self, _: *mut JSContext) -> JSVal {
// TODO: Implement x-origin opener
UndefinedValue()
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-opener
unsafe fn SetOpener(&self, _: *mut JSContext, _: HandleValue) {
// TODO: Implement x-origin opener
}
// https://html.spec.whatwg.org/multipage/#dom-window-blur
fn | (&self) {
// TODO: Implement x-origin blur
}
// https://html.spec.whatwg.org/multipage/#dom-focus
fn Focus(&self) {
// TODO: Implement x-origin focus
}
// https://html.spec.whatwg.org/multipage/#dom-location
fn Location(&self) -> DomRoot<DissimilarOriginLocation> {
self.location.or_init(|| DissimilarOriginLocation::new(self))
}
}
impl DissimilarOriginWindow {
pub fn post_message(&self, origin: Option<ImmutableOrigin>, data: StructuredCloneData) {
let msg = ScriptMsg::PostMessage(self.window_proxy.browsing_context_id(),
origin,
data.move_to_arraybuffer());
let _ = self.upcast::<GlobalScope>().script_to_constellation_chan().send(msg);
}
}
| Blur | identifier_name |
dissimilaroriginwindow.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DissimilarOriginWindowBinding;
use dom::bindings::codegen::Bindings::DissimilarOriginWindowBinding::DissimilarOriginWindowMethods;
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::bindings::str::DOMString;
use dom::bindings::structuredclone::StructuredCloneData;
use dom::dissimilaroriginlocation::DissimilarOriginLocation;
use dom::globalscope::GlobalScope;
use dom::windowproxy::WindowProxy;
use dom_struct::dom_struct;
use ipc_channel::ipc;
use js::jsapi::{JSContext, HandleValue};
use js::jsval::{JSVal, UndefinedValue};
use msg::constellation_msg::PipelineId;
use script_traits::ScriptMsg;
use servo_url::ImmutableOrigin;
use servo_url::MutableOrigin;
use servo_url::ServoUrl;
/// Represents a dissimilar-origin `Window` that exists in another script thread.
///
/// Since the `Window` is in a different script thread, we cannot access it
/// directly, but some of its accessors (for example `window.parent`)
/// still need to function.
///
/// In `windowproxy.rs`, we create a custom window proxy for these windows,
/// that throws security exceptions for most accessors. This is not a replacement
/// for XOWs, but provides belt-and-braces security.
#[dom_struct]
pub struct DissimilarOriginWindow {
/// The global for this window.
globalscope: GlobalScope,
/// The window proxy for this window.
window_proxy: Dom<WindowProxy>,
/// The location of this window, initialized lazily.
location: MutNullableDom<DissimilarOriginLocation>,
}
impl DissimilarOriginWindow {
#[allow(unsafe_code)]
pub fn new(
global_to_clone_from: &GlobalScope,
window_proxy: &WindowProxy,
) -> DomRoot<Self> {
let cx = global_to_clone_from.get_cx();
// Any timer events fired on this window are ignored.
let (timer_event_chan, _) = ipc::channel().unwrap();
let win = Box::new(Self {
globalscope: GlobalScope::new_inherited(
PipelineId::new(),
global_to_clone_from.devtools_chan().cloned(),
global_to_clone_from.mem_profiler_chan().clone(),
global_to_clone_from.time_profiler_chan().clone(),
global_to_clone_from.script_to_constellation_chan().clone(),
global_to_clone_from.scheduler_chan().clone(),
global_to_clone_from.resource_threads().clone(),
timer_event_chan,
global_to_clone_from.origin().clone(),
// FIXME(nox): The microtask queue is probably not important
// here, but this whole DOM interface is a hack anyway.
global_to_clone_from.microtask_queue().clone(),
),
window_proxy: Dom::from_ref(window_proxy),
location: Default::default(),
});
unsafe { DissimilarOriginWindowBinding::Wrap(cx, win) }
}
pub fn origin(&self) -> &MutableOrigin {
self.upcast::<GlobalScope>().origin()
}
}
impl DissimilarOriginWindowMethods for DissimilarOriginWindow {
// https://html.spec.whatwg.org/multipage/#dom-window
fn Window(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-self
fn Self_(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-frames
fn Frames(&self) -> DomRoot<WindowProxy> {
DomRoot::from_ref(&*self.window_proxy)
}
// https://html.spec.whatwg.org/multipage/#dom-parent
fn GetParent(&self) -> Option<DomRoot<WindowProxy>> {
// Steps 1-3.
if self.window_proxy.is_browsing_context_discarded() {
return None;
}
// Step 4.
if let Some(parent) = self.window_proxy.parent() {
return Some(DomRoot::from_ref(parent));
}
// Step 5.
Some(DomRoot::from_ref(&*self.window_proxy))
}
// https://html.spec.whatwg.org/multipage/#dom-top
fn GetTop(&self) -> Option<DomRoot<WindowProxy>> {
// Steps 1-3.
if self.window_proxy.is_browsing_context_discarded() {
return None;
}
// Steps 4-5.
Some(DomRoot::from_ref(self.window_proxy.top()))
}
// https://html.spec.whatwg.org/multipage/#dom-length
fn Length(&self) -> u32 {
// TODO: Implement x-origin length
0
}
// https://html.spec.whatwg.org/multipage/#dom-window-close
fn Close(&self) {
// TODO: Implement x-origin close
}
// https://html.spec.whatwg.org/multipage/#dom-window-closed
fn Closed(&self) -> bool {
// TODO: Implement x-origin close
false
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-window-postmessage
unsafe fn PostMessage(&self, cx: *mut JSContext, message: HandleValue, origin: DOMString) -> ErrorResult {
// Step 3-5.
let origin = match &origin[..] {
"*" => None,
"/" => {
// TODO: Should be the origin of the incumbent settings object.
None
},
url => match ServoUrl::parse(&url) {
Ok(url) => Some(url.origin()),
Err(_) => return Err(Error::Syntax),
}
};
// Step 1-2, 6-8.
// TODO(#12717): Should implement the `transfer` argument.
let data = StructuredCloneData::write(cx, message)?;
// Step 9.
self.post_message(origin, data);
Ok(())
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-opener
unsafe fn Opener(&self, _: *mut JSContext) -> JSVal {
// TODO: Implement x-origin opener
UndefinedValue()
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-opener
unsafe fn SetOpener(&self, _: *mut JSContext, _: HandleValue) {
// TODO: Implement x-origin opener
}
// https://html.spec.whatwg.org/multipage/#dom-window-blur | // https://html.spec.whatwg.org/multipage/#dom-focus
fn Focus(&self) {
// TODO: Implement x-origin focus
}
// https://html.spec.whatwg.org/multipage/#dom-location
fn Location(&self) -> DomRoot<DissimilarOriginLocation> {
self.location.or_init(|| DissimilarOriginLocation::new(self))
}
}
impl DissimilarOriginWindow {
pub fn post_message(&self, origin: Option<ImmutableOrigin>, data: StructuredCloneData) {
let msg = ScriptMsg::PostMessage(self.window_proxy.browsing_context_id(),
origin,
data.move_to_arraybuffer());
let _ = self.upcast::<GlobalScope>().script_to_constellation_chan().send(msg);
}
} | fn Blur(&self) {
// TODO: Implement x-origin blur
}
| random_line_split |
api.rs | use std::collections::BTreeMap;
use anyhow::Result;
use lazy_static::lazy_static;
use super::WindowsEmulator;
pub enum CallingConvention {
Stdcall,
Cdecl,
}
pub struct ArgumentDescriptor {
pub ty: String,
pub name: String,
}
pub struct FunctionDescriptor {
pub calling_convention: CallingConvention,
pub return_type: String,
pub arguments: Vec<ArgumentDescriptor>,
}
type Hook = Box<dyn Fn(&mut dyn WindowsEmulator, &FunctionDescriptor) -> Result<()> + Send + Sync>;
lazy_static! {
pub static ref API: BTreeMap<String, FunctionDescriptor> = {
let mut m = BTreeMap::new();
// populate from: https://github.com/microsoft/windows-rs/blob/master/.windows/winmd/Windows.Win32.winmd
// alternative source: https://github.com/vivisect/vivisect/blob/master/vivisect/impapi/windows/i386.py
// alternative source: https://github.com/fireeye/speakeasy/blob/88502c6eb99dd21ca6ebdcba3edff42c9c2c1bf8/speakeasy/winenv/api/usermode/kernel32.py#L1192
m.insert(
String::from("kernel32.dll!GetVersionExA"),
FunctionDescriptor {
calling_convention: CallingConvention::Stdcall,
return_type: String::from("bool"),
arguments: vec![
ArgumentDescriptor {
ty: String::from("LPOSVERSIONINFOA"), |
m
};
pub static ref HOOKS: BTreeMap<String, Hook> = {
let mut m = BTreeMap::new();
m.insert(
String::from("kernel32.dll!GetVersionExA"),
Box::new(
move |emu: &mut dyn WindowsEmulator, desc: &FunctionDescriptor| -> Result<()> {
let ra = emu.pop()?;
emu.set_pc(ra);
// this is 32-bit land
if let CallingConvention::Stdcall = desc.calling_convention {
for _ in 0..desc.arguments.len() {
let _ = emu.pop()?;
}
}
// TODO:
// this is 64-bit
// emu.inner.set_rax(0);
//emu.handle_return(0, desc)?;
Ok(())
}
) as Hook
);
m
};
} | name: String::from("lpVersionInformation"),
}
]
}
); | random_line_split |
api.rs | use std::collections::BTreeMap;
use anyhow::Result;
use lazy_static::lazy_static;
use super::WindowsEmulator;
pub enum CallingConvention {
Stdcall,
Cdecl,
}
pub struct ArgumentDescriptor {
pub ty: String,
pub name: String,
}
pub struct | {
pub calling_convention: CallingConvention,
pub return_type: String,
pub arguments: Vec<ArgumentDescriptor>,
}
type Hook = Box<dyn Fn(&mut dyn WindowsEmulator, &FunctionDescriptor) -> Result<()> + Send + Sync>;
lazy_static! {
pub static ref API: BTreeMap<String, FunctionDescriptor> = {
let mut m = BTreeMap::new();
// populate from: https://github.com/microsoft/windows-rs/blob/master/.windows/winmd/Windows.Win32.winmd
// alternative source: https://github.com/vivisect/vivisect/blob/master/vivisect/impapi/windows/i386.py
// alternative source: https://github.com/fireeye/speakeasy/blob/88502c6eb99dd21ca6ebdcba3edff42c9c2c1bf8/speakeasy/winenv/api/usermode/kernel32.py#L1192
m.insert(
String::from("kernel32.dll!GetVersionExA"),
FunctionDescriptor {
calling_convention: CallingConvention::Stdcall,
return_type: String::from("bool"),
arguments: vec![
ArgumentDescriptor {
ty: String::from("LPOSVERSIONINFOA"),
name: String::from("lpVersionInformation"),
}
]
}
);
m
};
pub static ref HOOKS: BTreeMap<String, Hook> = {
let mut m = BTreeMap::new();
m.insert(
String::from("kernel32.dll!GetVersionExA"),
Box::new(
move |emu: &mut dyn WindowsEmulator, desc: &FunctionDescriptor| -> Result<()> {
let ra = emu.pop()?;
emu.set_pc(ra);
// this is 32-bit land
if let CallingConvention::Stdcall = desc.calling_convention {
for _ in 0..desc.arguments.len() {
let _ = emu.pop()?;
}
}
// TODO:
// this is 64-bit
// emu.inner.set_rax(0);
//emu.handle_return(0, desc)?;
Ok(())
}
) as Hook
);
m
};
}
| FunctionDescriptor | identifier_name |
borrowck-rvalues-mutable.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
struct Counter {
value: usize
}
impl Counter {
fn new(v: usize) -> Counter {
Counter {value: v}
}
fn inc<'a>(&'a mut self) -> &'a mut Counter {
self.value += 1;
self
}
fn get(&self) -> usize {
self.value
}
fn | (&mut self) -> usize {
let v = self.value;
self.value += 1;
v
}
}
pub fn main() {
let v = Counter::new(22).get_and_inc();
assert_eq!(v, 22);
let v = Counter::new(22).inc().inc().get();
assert_eq!(v, 24);;
}
| get_and_inc | identifier_name |
borrowck-rvalues-mutable.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
struct Counter {
value: usize
}
impl Counter {
fn new(v: usize) -> Counter {
Counter {value: v}
}
fn inc<'a>(&'a mut self) -> &'a mut Counter |
fn get(&self) -> usize {
self.value
}
fn get_and_inc(&mut self) -> usize {
let v = self.value;
self.value += 1;
v
}
}
pub fn main() {
let v = Counter::new(22).get_and_inc();
assert_eq!(v, 22);
let v = Counter::new(22).inc().inc().get();
assert_eq!(v, 24);;
}
| {
self.value += 1;
self
} | identifier_body |
borrowck-rvalues-mutable.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
struct Counter {
value: usize
}
impl Counter {
fn new(v: usize) -> Counter {
Counter {value: v}
}
fn inc<'a>(&'a mut self) -> &'a mut Counter {
self.value += 1;
self
}
fn get(&self) -> usize {
self.value
}
fn get_and_inc(&mut self) -> usize {
let v = self.value;
self.value += 1;
v
}
}
pub fn main() {
let v = Counter::new(22).get_and_inc();
assert_eq!(v, 22); |
let v = Counter::new(22).inc().inc().get();
assert_eq!(v, 24);;
} | random_line_split |
|
format.rs | #[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct Format(pub ChannelFormat, pub NumericFormat);
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ChannelFormat {
R4G4,
R4G4B4A4,
R5G6B5,
B5G6R5,
R5G5B5A1,
R8,
R8G8,
R8G8B8A8,
B8G8R8A8, | R11G11B10,
R10G10B10A2,
R16,
R16G16,
R16G16B16A16,
R32,
R32G32,
R32G32B32,
R32G32B32A32,
R16G8,
R32G8,
R9G9B9E5
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum NumericFormat {
UnsignedNormalizedInteger,
SignedNormalizedInteger,
UnsignedInteger,
SignedInteger,
Float
} | R10G11B11, | random_line_split |
format.rs | #[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct Format(pub ChannelFormat, pub NumericFormat);
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ChannelFormat {
R4G4,
R4G4B4A4,
R5G6B5,
B5G6R5,
R5G5B5A1,
R8,
R8G8,
R8G8B8A8,
B8G8R8A8,
R10G11B11,
R11G11B10,
R10G10B10A2,
R16,
R16G16,
R16G16B16A16,
R32,
R32G32,
R32G32B32,
R32G32B32A32,
R16G8,
R32G8,
R9G9B9E5
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum | {
UnsignedNormalizedInteger,
SignedNormalizedInteger,
UnsignedInteger,
SignedInteger,
Float
} | NumericFormat | identifier_name |
cleanup.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[doc(hidden)];
use libc::{c_char, c_void, intptr_t, uintptr_t};
use ptr::mut_null;
use repr::BoxRepr;
use sys::TypeDesc;
use cast::transmute;
#[cfg(notest)] use ptr::to_unsafe_ptr;
/**
* Runtime structures
*
* NB: These must match the representation in the C++ runtime.
*/
type DropGlue<'self> = &'self fn(**TypeDesc, *c_void);
type FreeGlue<'self> = &'self fn(**TypeDesc, *c_void);
type TaskID = uintptr_t;
struct StackSegment { priv opaque: () }
struct Scheduler { priv opaque: () }
struct SchedulerLoop { priv opaque: () }
struct Kernel { priv opaque: () }
struct Env { priv opaque: () }
struct AllocHeader { priv opaque: () }
struct MemoryRegion { priv opaque: () }
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
struct Registers {
data: [u32,..16]
}
#[cfg(target_arch="mips")]
struct Registers {
data: [u32,..32]
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Context {
regs: Registers,
next: *Context,
pad: [u32,..3]
}
#[cfg(target_arch="x86_64")]
struct Registers {
data: [u64,..22]
}
#[cfg(target_arch="x86_64")]
struct Context {
regs: Registers,
next: *Context,
pad: uintptr_t
}
struct BoxedRegion {
env: *Env,
backing_region: *MemoryRegion,
live_allocs: *BoxRepr
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Task {
// Public fields
refcount: intptr_t, // 0
id: TaskID, // 4
pad: [u32,..2], // 8
ctx: Context, // 16
stack_segment: *StackSegment, // 96
runtime_sp: uintptr_t, // 100
scheduler: *Scheduler, // 104
scheduler_loop: *SchedulerLoop, // 108
// Fields known only to the runtime
kernel: *Kernel, // 112
name: *c_char, // 116
list_index: i32, // 120
boxed_region: BoxedRegion // 128
}
#[cfg(target_arch="x86_64")]
struct Task {
// Public fields
refcount: intptr_t,
id: TaskID,
ctx: Context,
stack_segment: *StackSegment,
runtime_sp: uintptr_t,
scheduler: *Scheduler,
scheduler_loop: *SchedulerLoop,
// Fields known only to the runtime
kernel: *Kernel,
name: *c_char,
list_index: i32,
boxed_region: BoxedRegion
}
/*
* Box annihilation
*
* This runs at task death to free all boxes.
*/
struct AnnihilateStats {
n_total_boxes: uint,
n_unique_boxes: uint,
n_bytes_freed: uint
}
unsafe fn each_live_alloc(f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) {
use managed;
let task: *Task = transmute(rustrt::rust_get_task());
let box = (*task).boxed_region.live_allocs;
let mut box: *mut BoxRepr = transmute(copy box);
while box!= mut_null() {
let next = transmute(copy (*box).header.next);
let uniq =
(*box).header.ref_count == managed::raw::RC_MANAGED_UNIQUE;
if! f(box, uniq) {
break
}
box = next
}
}
#[cfg(unix)]
fn debug_mem() -> bool {
::rt::env::get().debug_mem
}
#[cfg(windows)]
fn debug_mem() -> bool {
false
}
/// Destroys all managed memory (i.e. @ boxes) held by the current task.
#[cfg(notest)]
#[lang="annihilate"]
pub unsafe fn annihilate() {
use unstable::lang::local_free;
use io::WriterUtil;
use io;
use libc;
use sys;
use managed;
let mut stats = AnnihilateStats {
n_total_boxes: 0,
n_unique_boxes: 0,
n_bytes_freed: 0
};
// Pass 1: Make all boxes immortal.
for each_live_alloc |box, uniq| {
stats.n_total_boxes += 1;
if uniq {
stats.n_unique_boxes += 1;
} else {
(*box).header.ref_count = managed::raw::RC_IMMORTAL;
}
}
// Pass 2: Drop all boxes.
for each_live_alloc |box, uniq| {
if!uniq {
let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc);
let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0));
drop_glue(to_unsafe_ptr(&tydesc), transmute(&(*box).data));
}
}
// Pass 3: Free all boxes.
for each_live_alloc |box, uniq| {
if!uniq {
stats.n_bytes_freed +=
(*((*box).header.type_desc)).size
+ sys::size_of::<BoxRepr>();
local_free(transmute(box));
}
}
if debug_mem() {
// We do logging here w/o allocation.
let dbg = libc::STDERR_FILENO as io::fd_t; | dbg.write_uint(stats.n_unique_boxes);
dbg.write_str("\n bytes_freed: ");
dbg.write_uint(stats.n_bytes_freed);
dbg.write_str("\n");
}
}
/// Bindings to the runtime
pub mod rustrt {
use libc::c_void;
#[link_name = "rustrt"]
pub extern {
#[rust_stack]
// FIXME (#4386): Unable to make following method private.
pub unsafe fn rust_get_task() -> *c_void;
}
} | dbg.write_str("annihilator stats:");
dbg.write_str("\n total_boxes: ");
dbg.write_uint(stats.n_total_boxes);
dbg.write_str("\n unique_boxes: "); | random_line_split |
cleanup.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[doc(hidden)];
use libc::{c_char, c_void, intptr_t, uintptr_t};
use ptr::mut_null;
use repr::BoxRepr;
use sys::TypeDesc;
use cast::transmute;
#[cfg(notest)] use ptr::to_unsafe_ptr;
/**
* Runtime structures
*
* NB: These must match the representation in the C++ runtime.
*/
type DropGlue<'self> = &'self fn(**TypeDesc, *c_void);
type FreeGlue<'self> = &'self fn(**TypeDesc, *c_void);
type TaskID = uintptr_t;
struct StackSegment { priv opaque: () }
struct Scheduler { priv opaque: () }
struct SchedulerLoop { priv opaque: () }
struct Kernel { priv opaque: () }
struct Env { priv opaque: () }
struct AllocHeader { priv opaque: () }
struct MemoryRegion { priv opaque: () }
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
struct Registers {
data: [u32,..16]
}
#[cfg(target_arch="mips")]
struct Registers {
data: [u32,..32]
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Context {
regs: Registers,
next: *Context,
pad: [u32,..3]
}
#[cfg(target_arch="x86_64")]
struct Registers {
data: [u64,..22]
}
#[cfg(target_arch="x86_64")]
struct Context {
regs: Registers,
next: *Context,
pad: uintptr_t
}
struct BoxedRegion {
env: *Env,
backing_region: *MemoryRegion,
live_allocs: *BoxRepr
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Task {
// Public fields
refcount: intptr_t, // 0
id: TaskID, // 4
pad: [u32,..2], // 8
ctx: Context, // 16
stack_segment: *StackSegment, // 96
runtime_sp: uintptr_t, // 100
scheduler: *Scheduler, // 104
scheduler_loop: *SchedulerLoop, // 108
// Fields known only to the runtime
kernel: *Kernel, // 112
name: *c_char, // 116
list_index: i32, // 120
boxed_region: BoxedRegion // 128
}
#[cfg(target_arch="x86_64")]
struct Task {
// Public fields
refcount: intptr_t,
id: TaskID,
ctx: Context,
stack_segment: *StackSegment,
runtime_sp: uintptr_t,
scheduler: *Scheduler,
scheduler_loop: *SchedulerLoop,
// Fields known only to the runtime
kernel: *Kernel,
name: *c_char,
list_index: i32,
boxed_region: BoxedRegion
}
/*
* Box annihilation
*
* This runs at task death to free all boxes.
*/
struct AnnihilateStats {
n_total_boxes: uint,
n_unique_boxes: uint,
n_bytes_freed: uint
}
unsafe fn each_live_alloc(f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) {
use managed;
let task: *Task = transmute(rustrt::rust_get_task());
let box = (*task).boxed_region.live_allocs;
let mut box: *mut BoxRepr = transmute(copy box);
while box!= mut_null() {
let next = transmute(copy (*box).header.next);
let uniq =
(*box).header.ref_count == managed::raw::RC_MANAGED_UNIQUE;
if! f(box, uniq) {
break
}
box = next
}
}
#[cfg(unix)]
fn debug_mem() -> bool {
::rt::env::get().debug_mem
}
#[cfg(windows)]
fn debug_mem() -> bool {
false
}
/// Destroys all managed memory (i.e. @ boxes) held by the current task.
#[cfg(notest)]
#[lang="annihilate"]
pub unsafe fn annihilate() {
use unstable::lang::local_free;
use io::WriterUtil;
use io;
use libc;
use sys;
use managed;
let mut stats = AnnihilateStats {
n_total_boxes: 0,
n_unique_boxes: 0,
n_bytes_freed: 0
};
// Pass 1: Make all boxes immortal.
for each_live_alloc |box, uniq| {
stats.n_total_boxes += 1;
if uniq {
stats.n_unique_boxes += 1;
} else {
(*box).header.ref_count = managed::raw::RC_IMMORTAL;
}
}
// Pass 2: Drop all boxes.
for each_live_alloc |box, uniq| {
if!uniq {
let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc);
let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0));
drop_glue(to_unsafe_ptr(&tydesc), transmute(&(*box).data));
}
}
// Pass 3: Free all boxes.
for each_live_alloc |box, uniq| {
if!uniq {
stats.n_bytes_freed +=
(*((*box).header.type_desc)).size
+ sys::size_of::<BoxRepr>();
local_free(transmute(box));
}
}
if debug_mem() |
}
/// Bindings to the runtime
pub mod rustrt {
use libc::c_void;
#[link_name = "rustrt"]
pub extern {
#[rust_stack]
// FIXME (#4386): Unable to make following method private.
pub unsafe fn rust_get_task() -> *c_void;
}
}
| {
// We do logging here w/o allocation.
let dbg = libc::STDERR_FILENO as io::fd_t;
dbg.write_str("annihilator stats:");
dbg.write_str("\n total_boxes: ");
dbg.write_uint(stats.n_total_boxes);
dbg.write_str("\n unique_boxes: ");
dbg.write_uint(stats.n_unique_boxes);
dbg.write_str("\n bytes_freed: ");
dbg.write_uint(stats.n_bytes_freed);
dbg.write_str("\n");
} | conditional_block |
cleanup.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[doc(hidden)];
use libc::{c_char, c_void, intptr_t, uintptr_t};
use ptr::mut_null;
use repr::BoxRepr;
use sys::TypeDesc;
use cast::transmute;
#[cfg(notest)] use ptr::to_unsafe_ptr;
/**
* Runtime structures
*
* NB: These must match the representation in the C++ runtime.
*/
type DropGlue<'self> = &'self fn(**TypeDesc, *c_void);
type FreeGlue<'self> = &'self fn(**TypeDesc, *c_void);
type TaskID = uintptr_t;
struct StackSegment { priv opaque: () }
struct Scheduler { priv opaque: () }
struct SchedulerLoop { priv opaque: () }
struct Kernel { priv opaque: () }
struct Env { priv opaque: () }
struct AllocHeader { priv opaque: () }
struct MemoryRegion { priv opaque: () }
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
struct Registers {
data: [u32,..16]
}
#[cfg(target_arch="mips")]
struct Registers {
data: [u32,..32]
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Context {
regs: Registers,
next: *Context,
pad: [u32,..3]
}
#[cfg(target_arch="x86_64")]
struct Registers {
data: [u64,..22]
}
#[cfg(target_arch="x86_64")]
struct Context {
regs: Registers,
next: *Context,
pad: uintptr_t
}
struct BoxedRegion {
env: *Env,
backing_region: *MemoryRegion,
live_allocs: *BoxRepr
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Task {
// Public fields
refcount: intptr_t, // 0
id: TaskID, // 4
pad: [u32,..2], // 8
ctx: Context, // 16
stack_segment: *StackSegment, // 96
runtime_sp: uintptr_t, // 100
scheduler: *Scheduler, // 104
scheduler_loop: *SchedulerLoop, // 108
// Fields known only to the runtime
kernel: *Kernel, // 112
name: *c_char, // 116
list_index: i32, // 120
boxed_region: BoxedRegion // 128
}
#[cfg(target_arch="x86_64")]
struct Task {
// Public fields
refcount: intptr_t,
id: TaskID,
ctx: Context,
stack_segment: *StackSegment,
runtime_sp: uintptr_t,
scheduler: *Scheduler,
scheduler_loop: *SchedulerLoop,
// Fields known only to the runtime
kernel: *Kernel,
name: *c_char,
list_index: i32,
boxed_region: BoxedRegion
}
/*
* Box annihilation
*
* This runs at task death to free all boxes.
*/
struct AnnihilateStats {
n_total_boxes: uint,
n_unique_boxes: uint,
n_bytes_freed: uint
}
unsafe fn each_live_alloc(f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) {
use managed;
let task: *Task = transmute(rustrt::rust_get_task());
let box = (*task).boxed_region.live_allocs;
let mut box: *mut BoxRepr = transmute(copy box);
while box!= mut_null() {
let next = transmute(copy (*box).header.next);
let uniq =
(*box).header.ref_count == managed::raw::RC_MANAGED_UNIQUE;
if! f(box, uniq) {
break
}
box = next
}
}
#[cfg(unix)]
fn debug_mem() -> bool {
::rt::env::get().debug_mem
}
#[cfg(windows)]
fn debug_mem() -> bool {
false
}
/// Destroys all managed memory (i.e. @ boxes) held by the current task.
#[cfg(notest)]
#[lang="annihilate"]
pub unsafe fn | () {
use unstable::lang::local_free;
use io::WriterUtil;
use io;
use libc;
use sys;
use managed;
let mut stats = AnnihilateStats {
n_total_boxes: 0,
n_unique_boxes: 0,
n_bytes_freed: 0
};
// Pass 1: Make all boxes immortal.
for each_live_alloc |box, uniq| {
stats.n_total_boxes += 1;
if uniq {
stats.n_unique_boxes += 1;
} else {
(*box).header.ref_count = managed::raw::RC_IMMORTAL;
}
}
// Pass 2: Drop all boxes.
for each_live_alloc |box, uniq| {
if!uniq {
let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc);
let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0));
drop_glue(to_unsafe_ptr(&tydesc), transmute(&(*box).data));
}
}
// Pass 3: Free all boxes.
for each_live_alloc |box, uniq| {
if!uniq {
stats.n_bytes_freed +=
(*((*box).header.type_desc)).size
+ sys::size_of::<BoxRepr>();
local_free(transmute(box));
}
}
if debug_mem() {
// We do logging here w/o allocation.
let dbg = libc::STDERR_FILENO as io::fd_t;
dbg.write_str("annihilator stats:");
dbg.write_str("\n total_boxes: ");
dbg.write_uint(stats.n_total_boxes);
dbg.write_str("\n unique_boxes: ");
dbg.write_uint(stats.n_unique_boxes);
dbg.write_str("\n bytes_freed: ");
dbg.write_uint(stats.n_bytes_freed);
dbg.write_str("\n");
}
}
/// Bindings to the runtime
pub mod rustrt {
use libc::c_void;
#[link_name = "rustrt"]
pub extern {
#[rust_stack]
// FIXME (#4386): Unable to make following method private.
pub unsafe fn rust_get_task() -> *c_void;
}
}
| annihilate | identifier_name |
stylearc.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Fork of Arc for the style system. This has the following advantages over std::Arc:
//! * We don't waste storage on the weak reference count.
//! * We don't do extra RMU operations to handle the possibility of weak references.
//! * We can experiment with arena allocation (todo).
//! * We can add methods to support our custom use cases [1].
//!
//! [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
// The semantics of Arc are alread documented in the Rust docs, so we don't
// duplicate those here.
#![allow(missing_docs)]
#[cfg(feature = "servo")]
use heapsize::HeapSizeOf;
#[cfg(feature = "servo")]
use serde::{Deserialize, Serialize};
use std::{isize, usize};
use std::borrow;
use std::cmp::Ordering;
use std::convert::From;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::sync::atomic;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
// Private macro to get the offset of a struct field in bytes from the address of the struct.
macro_rules! offset_of {
($container:path, $field:ident) => {{
// Make sure the field actually exists. This line ensures that a compile-time error is
// generated if $field is accessed through a Deref impl.
let $container { $field: _,.. };
// Create an (invalid) instance of the container and calculate the offset to its
// field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to
// be nullptr deref.
let invalid: $container = ::std::mem::uninitialized();
let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize;
// Do not run destructors on the made up invalid instance.
::std::mem::forget(invalid);
offset as isize
}};
}
/// A soft limit on the amount of references that may be made to an `Arc`.
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
pub struct Arc<T:?Sized> {
// FIXME(bholley): When NonZero/Shared/Unique are stabilized, we should use
// Shared here to get the NonZero optimization. Gankro is working on this.
//
// If we need a compact Option<Arc<T>> beforehand, we can make a helper
// class that wraps the result of Arc::into_raw.
//
// https://github.com/rust-lang/rust/issues/27730
ptr: *mut ArcInner<T>,
}
/// An Arc that is known to be uniquely owned
///
/// This lets us build arcs that we can mutate before
/// freezing, without needing to change the allocation
pub struct UniqueArc<T:?Sized>(Arc<T>);
impl<T> UniqueArc<T> {
#[inline]
/// Construct a new UniqueArc
pub fn new(data: T) -> Self {
UniqueArc(Arc::new(data))
}
#[inline]
/// Convert to a shareable Arc<T> once we're done using it
pub fn shareable(self) -> Arc<T> {
self.0
}
}
impl<T> Deref for UniqueArc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<T> DerefMut for UniqueArc<T> {
fn deref_mut(&mut self) -> &mut T {
// We know this to be uniquely owned
unsafe { &mut (*self.0.ptr).data }
}
}
unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {}
struct ArcInner<T:?Sized> {
count: atomic::AtomicUsize,
data: T,
}
unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
#[inline]
pub fn new(data: T) -> Self {
let x = Box::new(ArcInner {
count: atomic::AtomicUsize::new(1),
data: data,
});
Arc { ptr: Box::into_raw(x) }
}
pub fn into_raw(this: Self) -> *const T {
let ptr = unsafe { &((*this.ptr).data) as *const _ };
mem::forget(this);
ptr
}
pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `ArcInner` we need
// to subtract the offset of the `data` field from the pointer.
let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
Arc {
ptr: ptr as *mut ArcInner<T>,
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// This unsafety is ok because while this arc is alive we're guaranteed
// that the inner pointer is valid. Furthermore, we know that the
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &*self.ptr }
}
// Non-inlined part of `drop`. Just invokes the destructor.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let _ = Box::from_raw(self.ptr);
}
#[inline]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr == other.ptr
}
}
impl<T:?Sized> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// Note: std::process::abort is stable in 1.17, which we don't yet
// require for Gecko. Panic is good enough in practice here (it will
// trigger an abort at least in Gecko, and this case is degenerate
// enough that Servo shouldn't have code that triggers it).
//
// We should fix this when we require 1.17.
panic!();
}
Arc { ptr: self.ptr }
}
}
impl<T:?Sized> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.inner().data
}
}
impl<T: Clone> Arc<T> {
#[inline]
pub fn make_mut(this: &mut Self) -> &mut T {
if!this.is_unique() {
// Another pointer exists; clone
*this = Arc::new((**this).clone());
}
unsafe {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
&mut (*this.ptr).data
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.is_unique() {
unsafe {
// See make_mut() for documentation of the threadsafety here.
Some(&mut (*this.ptr).data)
}
} else {
None
}
}
#[inline]
fn is_unique(&self) -> bool {
// We can use Relaxed here, but the justification is a bit subtle.
//
// The reason to use Acquire would be to synchronize with other threads
// that are modifying the refcount with Release, i.e. to ensure that
// their writes to memory guarded by this refcount are flushed. However,
// we know that threads only modify the contents of the Arc when they
// observe the refcount to be 1, and no other thread could observe that
// because we're holding one strong reference here.
self.inner().count.load(Relaxed) == 1
}
}
impl<T:?Sized> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object.
if self.inner().count.fetch_sub(1, Release)!= 1 |
// FIXME(bholley): Use the updated comment when [2] is merged.
//
// This load is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` load. This
// means that use of the data happens before decreasing the reference
// count, which happens before this load, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
// [2]: https://github.com/rust-lang/rust/pull/41714
self.inner().count.load(Acquire);
unsafe {
self.drop_slow();
}
}
}
impl<T:?Sized + PartialEq> PartialEq for Arc<T> {
fn eq(&self, other: &Arc<T>) -> bool {
*(*self) == *(*other)
}
fn ne(&self, other: &Arc<T>) -> bool {
*(*self)!= *(*other)
}
}
impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> {
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
fn lt(&self, other: &Arc<T>) -> bool {
*(*self) < *(*other)
}
fn le(&self, other: &Arc<T>) -> bool {
*(*self) <= *(*other)
}
fn gt(&self, other: &Arc<T>) -> bool {
*(*self) > *(*other)
}
fn ge(&self, other: &Arc<T>) -> bool {
*(*self) >= *(*other)
}
}
impl<T:?Sized + Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering {
(**self).cmp(&**other)
}
}
impl<T:?Sized + Eq> Eq for Arc<T> {}
impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T:?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr, f)
}
}
impl<T: Default> Default for Arc<T> {
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T:?Sized + Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
}
impl<T> From<T> for Arc<T> {
fn from(t: T) -> Self {
Arc::new(t)
}
}
impl<T:?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T {
&**self
}
}
impl<T:?Sized> AsRef<T> for Arc<T> {
fn as_ref(&self) -> &T {
&**self
}
}
// This is what the HeapSize crate does for regular arc, but is questionably
// sound. See https://github.com/servo/heapsize/issues/37
#[cfg(feature = "servo")]
impl<T: HeapSizeOf> HeapSizeOf for Arc<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
#[cfg(feature = "servo")]
impl<T: Deserialize> Deserialize for Arc<T>
{
fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
where
D: ::serde::de::Deserializer,
{
T::deserialize(deserializer).map(Arc::new)
}
}
#[cfg(feature = "servo")]
impl<T: Serialize> Serialize for Arc<T>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
}
| {
return;
} | conditional_block |
stylearc.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Fork of Arc for the style system. This has the following advantages over std::Arc:
//! * We don't waste storage on the weak reference count.
//! * We don't do extra RMU operations to handle the possibility of weak references.
//! * We can experiment with arena allocation (todo).
//! * We can add methods to support our custom use cases [1].
//!
//! [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
// The semantics of Arc are alread documented in the Rust docs, so we don't
// duplicate those here.
#![allow(missing_docs)]
#[cfg(feature = "servo")]
use heapsize::HeapSizeOf;
#[cfg(feature = "servo")]
use serde::{Deserialize, Serialize};
use std::{isize, usize};
use std::borrow;
use std::cmp::Ordering;
use std::convert::From;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::sync::atomic;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
// Private macro to get the offset of a struct field in bytes from the address of the struct.
macro_rules! offset_of {
($container:path, $field:ident) => {{
// Make sure the field actually exists. This line ensures that a compile-time error is
// generated if $field is accessed through a Deref impl.
let $container { $field: _,.. };
// Create an (invalid) instance of the container and calculate the offset to its
// field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to
// be nullptr deref.
let invalid: $container = ::std::mem::uninitialized();
let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize;
// Do not run destructors on the made up invalid instance.
::std::mem::forget(invalid);
offset as isize
}};
}
/// A soft limit on the amount of references that may be made to an `Arc`.
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
pub struct Arc<T:?Sized> {
// FIXME(bholley): When NonZero/Shared/Unique are stabilized, we should use
// Shared here to get the NonZero optimization. Gankro is working on this.
//
// If we need a compact Option<Arc<T>> beforehand, we can make a helper
// class that wraps the result of Arc::into_raw.
//
// https://github.com/rust-lang/rust/issues/27730
ptr: *mut ArcInner<T>,
}
/// An Arc that is known to be uniquely owned
///
/// This lets us build arcs that we can mutate before
/// freezing, without needing to change the allocation
pub struct UniqueArc<T:?Sized>(Arc<T>);
impl<T> UniqueArc<T> {
#[inline]
/// Construct a new UniqueArc
pub fn new(data: T) -> Self {
UniqueArc(Arc::new(data))
}
#[inline]
/// Convert to a shareable Arc<T> once we're done using it
pub fn shareable(self) -> Arc<T> {
self.0
}
}
impl<T> Deref for UniqueArc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<T> DerefMut for UniqueArc<T> {
fn deref_mut(&mut self) -> &mut T {
// We know this to be uniquely owned
unsafe { &mut (*self.0.ptr).data }
}
}
unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {}
struct ArcInner<T:?Sized> {
count: atomic::AtomicUsize,
data: T,
}
unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
#[inline]
pub fn new(data: T) -> Self {
let x = Box::new(ArcInner {
count: atomic::AtomicUsize::new(1),
data: data,
});
Arc { ptr: Box::into_raw(x) }
}
pub fn into_raw(this: Self) -> *const T {
let ptr = unsafe { &((*this.ptr).data) as *const _ };
mem::forget(this);
ptr
}
pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `ArcInner` we need
// to subtract the offset of the `data` field from the pointer.
let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
Arc {
ptr: ptr as *mut ArcInner<T>,
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// This unsafety is ok because while this arc is alive we're guaranteed
// that the inner pointer is valid. Furthermore, we know that the
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &*self.ptr }
}
// Non-inlined part of `drop`. Just invokes the destructor.
#[inline(never)]
unsafe fn drop_slow(&mut self) |
#[inline]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr == other.ptr
}
}
impl<T:?Sized> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// Note: std::process::abort is stable in 1.17, which we don't yet
// require for Gecko. Panic is good enough in practice here (it will
// trigger an abort at least in Gecko, and this case is degenerate
// enough that Servo shouldn't have code that triggers it).
//
// We should fix this when we require 1.17.
panic!();
}
Arc { ptr: self.ptr }
}
}
impl<T:?Sized> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.inner().data
}
}
impl<T: Clone> Arc<T> {
#[inline]
pub fn make_mut(this: &mut Self) -> &mut T {
if!this.is_unique() {
// Another pointer exists; clone
*this = Arc::new((**this).clone());
}
unsafe {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
&mut (*this.ptr).data
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.is_unique() {
unsafe {
// See make_mut() for documentation of the threadsafety here.
Some(&mut (*this.ptr).data)
}
} else {
None
}
}
#[inline]
fn is_unique(&self) -> bool {
// We can use Relaxed here, but the justification is a bit subtle.
//
// The reason to use Acquire would be to synchronize with other threads
// that are modifying the refcount with Release, i.e. to ensure that
// their writes to memory guarded by this refcount are flushed. However,
// we know that threads only modify the contents of the Arc when they
// observe the refcount to be 1, and no other thread could observe that
// because we're holding one strong reference here.
self.inner().count.load(Relaxed) == 1
}
}
impl<T:?Sized> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object.
if self.inner().count.fetch_sub(1, Release)!= 1 {
return;
}
// FIXME(bholley): Use the updated comment when [2] is merged.
//
// This load is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` load. This
// means that use of the data happens before decreasing the reference
// count, which happens before this load, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
// [2]: https://github.com/rust-lang/rust/pull/41714
self.inner().count.load(Acquire);
unsafe {
self.drop_slow();
}
}
}
impl<T:?Sized + PartialEq> PartialEq for Arc<T> {
fn eq(&self, other: &Arc<T>) -> bool {
*(*self) == *(*other)
}
fn ne(&self, other: &Arc<T>) -> bool {
*(*self)!= *(*other)
}
}
impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> {
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
fn lt(&self, other: &Arc<T>) -> bool {
*(*self) < *(*other)
}
fn le(&self, other: &Arc<T>) -> bool {
*(*self) <= *(*other)
}
fn gt(&self, other: &Arc<T>) -> bool {
*(*self) > *(*other)
}
fn ge(&self, other: &Arc<T>) -> bool {
*(*self) >= *(*other)
}
}
impl<T:?Sized + Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering {
(**self).cmp(&**other)
}
}
impl<T:?Sized + Eq> Eq for Arc<T> {}
impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T:?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr, f)
}
}
impl<T: Default> Default for Arc<T> {
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T:?Sized + Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
}
impl<T> From<T> for Arc<T> {
fn from(t: T) -> Self {
Arc::new(t)
}
}
impl<T:?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T {
&**self
}
}
impl<T:?Sized> AsRef<T> for Arc<T> {
fn as_ref(&self) -> &T {
&**self
}
}
// This is what the HeapSize crate does for regular arc, but is questionably
// sound. See https://github.com/servo/heapsize/issues/37
#[cfg(feature = "servo")]
impl<T: HeapSizeOf> HeapSizeOf for Arc<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
#[cfg(feature = "servo")]
impl<T: Deserialize> Deserialize for Arc<T>
{
fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
where
D: ::serde::de::Deserializer,
{
T::deserialize(deserializer).map(Arc::new)
}
}
#[cfg(feature = "servo")]
impl<T: Serialize> Serialize for Arc<T>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
}
| {
let _ = Box::from_raw(self.ptr);
} | identifier_body |
stylearc.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Fork of Arc for the style system. This has the following advantages over std::Arc:
//! * We don't waste storage on the weak reference count.
//! * We don't do extra RMU operations to handle the possibility of weak references.
//! * We can experiment with arena allocation (todo).
//! * We can add methods to support our custom use cases [1].
//!
//! [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
// The semantics of Arc are alread documented in the Rust docs, so we don't
// duplicate those here.
#![allow(missing_docs)]
#[cfg(feature = "servo")]
use heapsize::HeapSizeOf;
#[cfg(feature = "servo")]
use serde::{Deserialize, Serialize};
use std::{isize, usize};
use std::borrow;
use std::cmp::Ordering;
use std::convert::From;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::sync::atomic;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
// Private macro to get the offset of a struct field in bytes from the address of the struct.
macro_rules! offset_of {
($container:path, $field:ident) => {{
// Make sure the field actually exists. This line ensures that a compile-time error is
// generated if $field is accessed through a Deref impl.
let $container { $field: _,.. };
// Create an (invalid) instance of the container and calculate the offset to its
// field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to
// be nullptr deref.
let invalid: $container = ::std::mem::uninitialized();
let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize;
// Do not run destructors on the made up invalid instance.
::std::mem::forget(invalid);
offset as isize
}};
}
/// A soft limit on the amount of references that may be made to an `Arc`.
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
pub struct Arc<T:?Sized> {
// FIXME(bholley): When NonZero/Shared/Unique are stabilized, we should use
// Shared here to get the NonZero optimization. Gankro is working on this.
//
// If we need a compact Option<Arc<T>> beforehand, we can make a helper
// class that wraps the result of Arc::into_raw.
//
// https://github.com/rust-lang/rust/issues/27730
ptr: *mut ArcInner<T>,
}
/// An Arc that is known to be uniquely owned
///
/// This lets us build arcs that we can mutate before
/// freezing, without needing to change the allocation
pub struct UniqueArc<T:?Sized>(Arc<T>);
impl<T> UniqueArc<T> {
#[inline]
/// Construct a new UniqueArc
pub fn new(data: T) -> Self {
UniqueArc(Arc::new(data))
}
#[inline]
/// Convert to a shareable Arc<T> once we're done using it
pub fn shareable(self) -> Arc<T> {
self.0
}
}
impl<T> Deref for UniqueArc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<T> DerefMut for UniqueArc<T> {
fn deref_mut(&mut self) -> &mut T {
// We know this to be uniquely owned
unsafe { &mut (*self.0.ptr).data }
}
}
unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {}
struct ArcInner<T:?Sized> {
count: atomic::AtomicUsize,
data: T,
}
unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
#[inline]
pub fn | (data: T) -> Self {
let x = Box::new(ArcInner {
count: atomic::AtomicUsize::new(1),
data: data,
});
Arc { ptr: Box::into_raw(x) }
}
pub fn into_raw(this: Self) -> *const T {
let ptr = unsafe { &((*this.ptr).data) as *const _ };
mem::forget(this);
ptr
}
pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `ArcInner` we need
// to subtract the offset of the `data` field from the pointer.
let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
Arc {
ptr: ptr as *mut ArcInner<T>,
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// This unsafety is ok because while this arc is alive we're guaranteed
// that the inner pointer is valid. Furthermore, we know that the
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &*self.ptr }
}
// Non-inlined part of `drop`. Just invokes the destructor.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let _ = Box::from_raw(self.ptr);
}
#[inline]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr == other.ptr
}
}
impl<T:?Sized> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// Note: std::process::abort is stable in 1.17, which we don't yet
// require for Gecko. Panic is good enough in practice here (it will
// trigger an abort at least in Gecko, and this case is degenerate
// enough that Servo shouldn't have code that triggers it).
//
// We should fix this when we require 1.17.
panic!();
}
Arc { ptr: self.ptr }
}
}
impl<T:?Sized> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.inner().data
}
}
impl<T: Clone> Arc<T> {
#[inline]
pub fn make_mut(this: &mut Self) -> &mut T {
if!this.is_unique() {
// Another pointer exists; clone
*this = Arc::new((**this).clone());
}
unsafe {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
&mut (*this.ptr).data
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.is_unique() {
unsafe {
// See make_mut() for documentation of the threadsafety here.
Some(&mut (*this.ptr).data)
}
} else {
None
}
}
#[inline]
fn is_unique(&self) -> bool {
// We can use Relaxed here, but the justification is a bit subtle.
//
// The reason to use Acquire would be to synchronize with other threads
// that are modifying the refcount with Release, i.e. to ensure that
// their writes to memory guarded by this refcount are flushed. However,
// we know that threads only modify the contents of the Arc when they
// observe the refcount to be 1, and no other thread could observe that
// because we're holding one strong reference here.
self.inner().count.load(Relaxed) == 1
}
}
impl<T:?Sized> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object.
if self.inner().count.fetch_sub(1, Release)!= 1 {
return;
}
// FIXME(bholley): Use the updated comment when [2] is merged.
//
// This load is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` load. This
// means that use of the data happens before decreasing the reference
// count, which happens before this load, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
// [2]: https://github.com/rust-lang/rust/pull/41714
self.inner().count.load(Acquire);
unsafe {
self.drop_slow();
}
}
}
impl<T:?Sized + PartialEq> PartialEq for Arc<T> {
fn eq(&self, other: &Arc<T>) -> bool {
*(*self) == *(*other)
}
fn ne(&self, other: &Arc<T>) -> bool {
*(*self)!= *(*other)
}
}
impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> {
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
fn lt(&self, other: &Arc<T>) -> bool {
*(*self) < *(*other)
}
fn le(&self, other: &Arc<T>) -> bool {
*(*self) <= *(*other)
}
fn gt(&self, other: &Arc<T>) -> bool {
*(*self) > *(*other)
}
fn ge(&self, other: &Arc<T>) -> bool {
*(*self) >= *(*other)
}
}
impl<T:?Sized + Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering {
(**self).cmp(&**other)
}
}
impl<T:?Sized + Eq> Eq for Arc<T> {}
impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T:?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr, f)
}
}
impl<T: Default> Default for Arc<T> {
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T:?Sized + Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
}
impl<T> From<T> for Arc<T> {
fn from(t: T) -> Self {
Arc::new(t)
}
}
impl<T:?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T {
&**self
}
}
impl<T:?Sized> AsRef<T> for Arc<T> {
fn as_ref(&self) -> &T {
&**self
}
}
// This is what the HeapSize crate does for regular arc, but is questionably
// sound. See https://github.com/servo/heapsize/issues/37
#[cfg(feature = "servo")]
impl<T: HeapSizeOf> HeapSizeOf for Arc<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
#[cfg(feature = "servo")]
impl<T: Deserialize> Deserialize for Arc<T>
{
fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
where
D: ::serde::de::Deserializer,
{
T::deserialize(deserializer).map(Arc::new)
}
}
#[cfg(feature = "servo")]
impl<T: Serialize> Serialize for Arc<T>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
}
| new | identifier_name |
stylearc.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Fork of Arc for the style system. This has the following advantages over std::Arc:
//! * We don't waste storage on the weak reference count.
//! * We don't do extra RMU operations to handle the possibility of weak references.
//! * We can experiment with arena allocation (todo).
//! * We can add methods to support our custom use cases [1].
//!
//! [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
// The semantics of Arc are alread documented in the Rust docs, so we don't
// duplicate those here.
#![allow(missing_docs)]
#[cfg(feature = "servo")]
use heapsize::HeapSizeOf;
#[cfg(feature = "servo")]
use serde::{Deserialize, Serialize};
use std::{isize, usize};
use std::borrow;
use std::cmp::Ordering;
use std::convert::From;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::sync::atomic;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
// Private macro to get the offset of a struct field in bytes from the address of the struct.
macro_rules! offset_of {
($container:path, $field:ident) => {{
// Make sure the field actually exists. This line ensures that a compile-time error is
// generated if $field is accessed through a Deref impl.
let $container { $field: _,.. };
// Create an (invalid) instance of the container and calculate the offset to its
// field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to
// be nullptr deref.
let invalid: $container = ::std::mem::uninitialized();
let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize;
// Do not run destructors on the made up invalid instance.
::std::mem::forget(invalid);
offset as isize
}};
}
/// A soft limit on the amount of references that may be made to an `Arc`.
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
pub struct Arc<T:?Sized> {
// FIXME(bholley): When NonZero/Shared/Unique are stabilized, we should use
// Shared here to get the NonZero optimization. Gankro is working on this.
//
// If we need a compact Option<Arc<T>> beforehand, we can make a helper
// class that wraps the result of Arc::into_raw.
//
// https://github.com/rust-lang/rust/issues/27730
ptr: *mut ArcInner<T>,
}
/// An Arc that is known to be uniquely owned
///
/// This lets us build arcs that we can mutate before
/// freezing, without needing to change the allocation
pub struct UniqueArc<T:?Sized>(Arc<T>);
impl<T> UniqueArc<T> {
#[inline]
/// Construct a new UniqueArc
pub fn new(data: T) -> Self {
UniqueArc(Arc::new(data))
}
#[inline]
/// Convert to a shareable Arc<T> once we're done using it
pub fn shareable(self) -> Arc<T> {
self.0
}
}
impl<T> Deref for UniqueArc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<T> DerefMut for UniqueArc<T> {
fn deref_mut(&mut self) -> &mut T {
// We know this to be uniquely owned
unsafe { &mut (*self.0.ptr).data }
}
}
unsafe impl<T:?Sized + Sync + Send> Send for Arc<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for Arc<T> {}
struct ArcInner<T:?Sized> {
count: atomic::AtomicUsize,
data: T,
}
unsafe impl<T:?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T:?Sized + Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
#[inline]
pub fn new(data: T) -> Self {
let x = Box::new(ArcInner {
count: atomic::AtomicUsize::new(1),
data: data,
});
Arc { ptr: Box::into_raw(x) }
}
pub fn into_raw(this: Self) -> *const T {
let ptr = unsafe { &((*this.ptr).data) as *const _ };
mem::forget(this);
ptr
}
pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `ArcInner` we need
// to subtract the offset of the `data` field from the pointer.
let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
Arc {
ptr: ptr as *mut ArcInner<T>,
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
fn inner(&self) -> &ArcInner<T> {
// This unsafety is ok because while this arc is alive we're guaranteed
// that the inner pointer is valid. Furthermore, we know that the
// `ArcInner` structure itself is `Sync` because the inner data is
// `Sync` as well, so we're ok loaning out an immutable pointer to these
// contents.
unsafe { &*self.ptr }
}
// Non-inlined part of `drop`. Just invokes the destructor.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let _ = Box::from_raw(self.ptr);
}
#[inline]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr == other.ptr
}
}
impl<T:?Sized> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// Note: std::process::abort is stable in 1.17, which we don't yet
// require for Gecko. Panic is good enough in practice here (it will
// trigger an abort at least in Gecko, and this case is degenerate
// enough that Servo shouldn't have code that triggers it).
//
// We should fix this when we require 1.17.
panic!();
}
Arc { ptr: self.ptr }
}
}
impl<T:?Sized> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.inner().data
}
}
impl<T: Clone> Arc<T> {
#[inline]
pub fn make_mut(this: &mut Self) -> &mut T {
if!this.is_unique() {
// Another pointer exists; clone
*this = Arc::new((**this).clone());
}
unsafe {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
&mut (*this.ptr).data
}
}
}
impl<T:?Sized> Arc<T> {
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.is_unique() {
unsafe {
// See make_mut() for documentation of the threadsafety here.
Some(&mut (*this.ptr).data)
}
} else {
None
}
}
#[inline]
fn is_unique(&self) -> bool {
// We can use Relaxed here, but the justification is a bit subtle.
//
// The reason to use Acquire would be to synchronize with other threads
// that are modifying the refcount with Release, i.e. to ensure that
// their writes to memory guarded by this refcount are flushed. However,
// we know that threads only modify the contents of the Arc when they
// observe the refcount to be 1, and no other thread could observe that
// because we're holding one strong reference here.
self.inner().count.load(Relaxed) == 1
}
}
impl<T:?Sized> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object.
if self.inner().count.fetch_sub(1, Release)!= 1 {
return;
}
// FIXME(bholley): Use the updated comment when [2] is merged.
//
// This load is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` load. This
// means that use of the data happens before decreasing the reference
// count, which happens before this load, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
// [2]: https://github.com/rust-lang/rust/pull/41714
self.inner().count.load(Acquire);
unsafe {
self.drop_slow();
}
}
}
impl<T:?Sized + PartialEq> PartialEq for Arc<T> {
fn eq(&self, other: &Arc<T>) -> bool {
*(*self) == *(*other)
}
fn ne(&self, other: &Arc<T>) -> bool {
*(*self)!= *(*other)
}
}
impl<T:?Sized + PartialOrd> PartialOrd for Arc<T> {
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
fn lt(&self, other: &Arc<T>) -> bool {
*(*self) < *(*other)
}
fn le(&self, other: &Arc<T>) -> bool {
*(*self) <= *(*other)
}
fn gt(&self, other: &Arc<T>) -> bool {
*(*self) > *(*other)
}
fn ge(&self, other: &Arc<T>) -> bool {
*(*self) >= *(*other)
}
}
impl<T:?Sized + Ord> Ord for Arc<T> {
fn cmp(&self, other: &Arc<T>) -> Ordering {
(**self).cmp(&**other)
}
}
impl<T:?Sized + Eq> Eq for Arc<T> {}
impl<T:?Sized + fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
} | impl<T:?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T:?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr, f)
}
}
impl<T: Default> Default for Arc<T> {
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T:?Sized + Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state)
}
}
impl<T> From<T> for Arc<T> {
fn from(t: T) -> Self {
Arc::new(t)
}
}
impl<T:?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T {
&**self
}
}
impl<T:?Sized> AsRef<T> for Arc<T> {
fn as_ref(&self) -> &T {
&**self
}
}
// This is what the HeapSize crate does for regular arc, but is questionably
// sound. See https://github.com/servo/heapsize/issues/37
#[cfg(feature = "servo")]
impl<T: HeapSizeOf> HeapSizeOf for Arc<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
#[cfg(feature = "servo")]
impl<T: Deserialize> Deserialize for Arc<T>
{
fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
where
D: ::serde::de::Deserializer,
{
T::deserialize(deserializer).map(Arc::new)
}
}
#[cfg(feature = "servo")]
impl<T: Serialize> Serialize for Arc<T>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
} | random_line_split |
|
closures.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
fn main() | {
// closures are anonymous functions.
let sum = |i: f64, j: f64| -> f64 { i + j };
let print_hi = || println!("Hi");
let i = 4.0;
let j = 3.0;
println!("{}", sum(i, j));
print_hi();
} | identifier_body |
|
closures.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
fn main() {
// closures are anonymous functions.
let sum = |i: f64, j: f64| -> f64 { i + j };
let print_hi = || println!("Hi");
let i = 4.0;
let j = 3.0;
| print_hi();
} | println!("{}", sum(i, j)); | random_line_split |
closures.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
fn | () {
// closures are anonymous functions.
let sum = |i: f64, j: f64| -> f64 { i + j };
let print_hi = || println!("Hi");
let i = 4.0;
let j = 3.0;
println!("{}", sum(i, j));
print_hi();
} | main | identifier_name |
tokeniser.rs | //! The _Tokeniser_ class.
#![experimental]
use std::char::is_whitespace;
use escape_scheme::EscapeScheme;
/// A tokeniser object.
///
/// A Tokeniser can be fed characters from an iterator, string, or individually.
/// It is an _immutable_ object: actions on a Tokeniser consume the Tokeniser,
/// and produce a fresh copy of the Tokeniser.
///
/// At any stage, a Tokeniser can be consumed to produce the vector of words
/// it has read, using the `into_strings` method. This method may fail if the
/// Tokeniser ended in a bad state (in the middle of a quoted string, or in
/// the middle of an escape sequence).
#[deriving(Clone)]
pub struct Tokeniser<Q, E, S> {
/// The current vector of parsed words.
vec: Vec<String>,
/// The current tokeniser error, if any.
/// An error ‘poisons’ the tokeniser, causing it to ignore any further
/// input.
error: Option<Error>,
/// Whether or not we are currently in a word.
in_word: bool,
/// The current closing quote character and quote mode, if any.
quote: Option<( char, QuoteMode )>,
/// The current escape scheme in use, if any.
escape: Option<S>,
/// Maps from quote openers to quote closers.
quote_map: Q,
/// Map from escape leader characters to their schemes.
escape_map: E,
}
/// A quote mode.
#[deriving(Clone)]
pub enum QuoteMode {
/// All characters except the closing character have their literal value.
/// This is equivalent to single-quoting in POSIX shell.
IgnoreEscapes,
/// All characters except the closing character and escape sequences
/// have their literal value. This is roughly equivalent to
/// double-quoting in POSIX shell.
ParseEscapes
}
/// A tokeniser error.
///
/// A Tokeniser's `into_strings` method can fail with one of the following
/// errors if called while the Tokeniser is in an unfinished state.
#[deriving(Clone, Eq, PartialEq, Show)]
pub enum Error {
/// A quotation was opened, but not closed.
UnmatchedQuote,
/// An escape sequence was started, but not finished.
UnfinishedEscape,
/// An unknown escape sequence was encountered.
BadEscape
}
impl<Q, E, S> Tokeniser<Q, E, S>
where Q: Map<char, ( char, QuoteMode )>,
E: Map<char, S>,
S: EscapeScheme,
Q: Clone,
E: Clone,
S: Clone,
Q: Collection {
/// Creates a new, blank Tokeniser.
///
/// # Arguments
///
/// * `quote_map` - A map, mapping characters that serve as opening quotes
/// to their closing quotes and quote modes.
/// * `escape_map` - A map, mapping escape leader characters to their escape
/// schemes. An empty map disables escapes.
///
/// # Return value
///
/// A new Tokeniser, with an empty state. Attempting to take the
/// string vector of the Tokeniser yields the empty vector.
///
/// # Example
///
/// ```rust
/// use std::collections::hashmap::HashMap;
/// use russet::{ Tokeniser, ParseEscapes, QuoteMode };
/// use russet::{ MapEscape, SimpleEscapeScheme };
///
/// let quote_map: HashMap<char, ( char, QuoteMode )> =
/// vec![ ( '\"', ( '\"', ParseEscapes ) ) ].move_iter().collect();
/// let escape_pairs: HashMap<char, char> =
/// vec![ ( 'n', '\n' ) ].move_iter().collect();
/// let escape_map: HashMap<char, SimpleEscapeScheme<HashMap<char, char>>> =
/// vec![ ( '\\', MapEscape(escape_pairs) )].move_iter().collect();
/// let tok = Tokeniser::new(quote_map, escape_map);
/// assert_eq!(tok.into_strings(), Ok(vec![]));
/// ```
pub fn new(quote_map: Q, escape_map: E) -> Tokeniser<Q, E, S> {
Tokeniser {
vec: vec![ String::new() ],
error: None,
in_word: false,
quote: None,
escape: None,
quote_map: quote_map,
escape_map: escape_map
}
}
/// Feeds a single character `chr` to a Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `chr`.
///
/// # Example
///
/// ```rust
/// use russet::whitespace_split_tokeniser;
///
/// let tok = whitespace_split_tokeniser();
/// let tok2 = tok.add_char('a').add_char('b').add_char('c');
/// assert_eq!(tok2.into_strings(), Ok(vec![ "abc".into_string() ]));
/// ```
pub fn add_char(self, chr: char) -> Tokeniser<Q, E, S> {
let mut new = self.clone();
match (chr, self) {
// ERROR
// Found an error
// -> Ignore input
( _, Tokeniser { error: Some(_),.. } ) => (),
// ESCAPE SEQUENCES
// Currently escaping
// -> Escape via escape scheme.
( c, Tokeniser { escape: Some(s),.. } ) => match s.escape(c) {
Some(cc) => new.emit(cc),
None => { new.error = Some(BadEscape); }
},
// ESCAPE LEADER
// Escape leader, not in quotes
// -> Begin escape (and word if not in one already)
( c, Tokeniser { escape: None,
quote: None,
escape_map: ref e,.. } ) if e.contains_key(&c) =>
new.start_escaping(c),
// Escape leader, in escape-permitting quotes
// -> Begin escape (and word if not in one already)
( c, Tokeniser { escape: None,
quote: Some(( _, ParseEscapes )),
escape_map: ref e,.. } ) if e.contains_key(&c) =>
new.start_escaping(c),
// QUOTE OPENING
// Quote opening character, not currently in quoted word
// -> Start quoting
( c, Tokeniser { escape: None, quote: None, quote_map: ref q,.. } )
if q.contains_key(&c) => {
new.quote = Some(q.find(&c).unwrap().clone());
new.in_word = true;
},
// QUOTE CLOSING
// Quote closing character, in quoted word, quotes ok
// -> Stop quoting
( c, Tokeniser { escape: None, quote: Some(( cc, _ )),.. } )
if c == cc => {
new.quote = None;
new.in_word = true;
},
// UNESCAPED WHITESPACE
// Unescaped whitespace, while not in a word
// -> Ignore
( a, Tokeniser { escape: None, in_word: false,.. } )
if is_whitespace(a) => (),
// Unescaped whitespace, while in a non-quoted word
// -> End word
( a, Tokeniser { escape: None, in_word: true, quote: None,.. } )
if is_whitespace(a) => {
new.in_word = false;
new.vec.push(String::new());
},
// DEFAULT
// Anything else
// -> Echo
( a, _ ) => new.emit(a)
}
new
}
/// Feeds an Iterator of chars, `it`, into the Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming the characters in `it`.
pub fn add_iter<I: Iterator<char>>(self, mut it: I) -> Tokeniser<Q, E, S> {
it.fold(self, |s, chr| s.add_char(chr))
}
/// Feeds a string, `string`, into the Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `string`.
pub fn add_string(self, string: &str) -> Tokeniser<Q, E, S> {
self.add_iter(string.chars())
}
/// Feeds a line, `line`, into the Tokeniser.
/// This differs from `add_str` in that the line is whitespace-trimmed
/// before adding.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `line`.
pub fn add_line(self, line: &str) -> Tokeniser<Q, E, S> {
self.add_string(line.trim())
}
/// Destroys the tokeniser, extracting the string vector.
///
/// # Return value
///
/// A Result, containing the tokenised string vector if the Tokeniser
/// was in a valid ending state, and an Error otherwise.
pub fn into_strings(mut self) -> Result<Vec<String>, Error> {
if self.error.is_some() {
Err(self.error.unwrap())
} else if self.in_word && self.quote.is_some() {
Err(UnmatchedQuote)
} else if self.escape.is_some() {
Err(UnfinishedEscape)
} else {
self.drop_empty_current_string();
Ok(self.vec)
}
}
/// Adds a character into a Tokeniser's current string.
/// This automatically sets the Tokeniser's state to be in a word,
/// and clears any escape sequence flag.
fn emit(&mut self, c: char) {
self.in_word = true;
self.escape = None;
self.vec.mut_last().mutate(|s| { s.push_char(c); s });
}
/// Switches on escape mode.
/// This automatically sets the Tokeniser to be in a word, if it isn't
/// already.
fn start_escaping(&mut self, c: char) {
| /// Drops the current working string, if it is empty.
fn drop_empty_current_string(&mut self) {
if self.vec.last().map(|s| s.is_empty()).unwrap_or(false) {
self.vec.pop();
}
}
}
| self.escape = self.escape_map.find(&c).map(|a| a.clone());
self.in_word = true;
}
| identifier_body |
tokeniser.rs | //! The _Tokeniser_ class.
#![experimental]
use std::char::is_whitespace;
use escape_scheme::EscapeScheme;
/// A tokeniser object.
///
/// A Tokeniser can be fed characters from an iterator, string, or individually.
/// It is an _immutable_ object: actions on a Tokeniser consume the Tokeniser,
/// and produce a fresh copy of the Tokeniser.
///
/// At any stage, a Tokeniser can be consumed to produce the vector of words
/// it has read, using the `into_strings` method. This method may fail if the
/// Tokeniser ended in a bad state (in the middle of a quoted string, or in
/// the middle of an escape sequence).
#[deriving(Clone)]
pub struct Tokeniser<Q, E, S> {
/// The current vector of parsed words.
vec: Vec<String>,
/// The current tokeniser error, if any.
/// An error ‘poisons’ the tokeniser, causing it to ignore any further
/// input.
error: Option<Error>,
/// Whether or not we are currently in a word.
in_word: bool,
/// The current closing quote character and quote mode, if any.
quote: Option<( char, QuoteMode )>,
/// The current escape scheme in use, if any.
escape: Option<S>,
/// Maps from quote openers to quote closers.
quote_map: Q,
/// Map from escape leader characters to their schemes.
escape_map: E,
}
/// A quote mode.
#[deriving(Clone)]
pub enum QuoteMode {
/// All characters except the closing character have their literal value.
/// This is equivalent to single-quoting in POSIX shell.
IgnoreEscapes,
/// All characters except the closing character and escape sequences
/// have their literal value. This is roughly equivalent to
/// double-quoting in POSIX shell.
ParseEscapes
}
/// A tokeniser error.
///
/// A Tokeniser's `into_strings` method can fail with one of the following
/// errors if called while the Tokeniser is in an unfinished state.
#[deriving(Clone, Eq, PartialEq, Show)]
pub enum Error {
/// A quotation was opened, but not closed.
UnmatchedQuote,
/// An escape sequence was started, but not finished.
UnfinishedEscape,
/// An unknown escape sequence was encountered.
BadEscape
}
impl<Q, E, S> Tokeniser<Q, E, S>
where Q: Map<char, ( char, QuoteMode )>,
E: Map<char, S>,
S: EscapeScheme,
Q: Clone,
E: Clone,
S: Clone,
Q: Collection {
/// Creates a new, blank Tokeniser.
///
/// # Arguments
///
/// * `quote_map` - A map, mapping characters that serve as opening quotes
/// to their closing quotes and quote modes.
/// * `escape_map` - A map, mapping escape leader characters to their escape
/// schemes. An empty map disables escapes.
///
/// # Return value
///
/// A new Tokeniser, with an empty state. Attempting to take the
/// string vector of the Tokeniser yields the empty vector.
///
/// # Example
///
/// ```rust
/// use std::collections::hashmap::HashMap;
/// use russet::{ Tokeniser, ParseEscapes, QuoteMode };
/// use russet::{ MapEscape, SimpleEscapeScheme };
///
/// let quote_map: HashMap<char, ( char, QuoteMode )> =
/// vec![ ( '\"', ( '\"', ParseEscapes ) ) ].move_iter().collect();
/// let escape_pairs: HashMap<char, char> =
/// vec![ ( 'n', '\n' ) ].move_iter().collect();
/// let escape_map: HashMap<char, SimpleEscapeScheme<HashMap<char, char>>> =
/// vec![ ( '\\', MapEscape(escape_pairs) )].move_iter().collect();
/// let tok = Tokeniser::new(quote_map, escape_map);
/// assert_eq!(tok.into_strings(), Ok(vec![]));
/// ```
pub fn new(quote_map: Q, escape_map: E) -> Tokeniser<Q, E, S> {
Tokeniser {
vec: vec![ String::new() ],
error: None,
in_word: false,
quote: None,
escape: None,
quote_map: quote_map,
escape_map: escape_map
}
}
/// Feeds a single character `chr` to a Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `chr`.
///
/// # Example
///
/// ```rust
/// use russet::whitespace_split_tokeniser;
///
/// let tok = whitespace_split_tokeniser();
/// let tok2 = tok.add_char('a').add_char('b').add_char('c');
/// assert_eq!(tok2.into_strings(), Ok(vec![ "abc".into_string() ]));
/// ```
pub fn add_char(self, chr: char) -> Tokeniser<Q, E, S> {
let mut new = self.clone();
match (chr, self) {
// ERROR
// Found an error
// -> Ignore input
( _, Tokeniser { error: Some(_),.. } ) => (),
// ESCAPE SEQUENCES
// Currently escaping
// -> Escape via escape scheme.
( c, Tokeniser { escape: Some(s),.. } ) => match s.escape(c) {
Some(cc) => new.emit(cc),
None => { new.error = Some(BadEscape); }
},
| // -> Begin escape (and word if not in one already)
( c, Tokeniser { escape: None,
quote: None,
escape_map: ref e,.. } ) if e.contains_key(&c) =>
new.start_escaping(c),
// Escape leader, in escape-permitting quotes
// -> Begin escape (and word if not in one already)
( c, Tokeniser { escape: None,
quote: Some(( _, ParseEscapes )),
escape_map: ref e,.. } ) if e.contains_key(&c) =>
new.start_escaping(c),
// QUOTE OPENING
// Quote opening character, not currently in quoted word
// -> Start quoting
( c, Tokeniser { escape: None, quote: None, quote_map: ref q,.. } )
if q.contains_key(&c) => {
new.quote = Some(q.find(&c).unwrap().clone());
new.in_word = true;
},
// QUOTE CLOSING
// Quote closing character, in quoted word, quotes ok
// -> Stop quoting
( c, Tokeniser { escape: None, quote: Some(( cc, _ )),.. } )
if c == cc => {
new.quote = None;
new.in_word = true;
},
// UNESCAPED WHITESPACE
// Unescaped whitespace, while not in a word
// -> Ignore
( a, Tokeniser { escape: None, in_word: false,.. } )
if is_whitespace(a) => (),
// Unescaped whitespace, while in a non-quoted word
// -> End word
( a, Tokeniser { escape: None, in_word: true, quote: None,.. } )
if is_whitespace(a) => {
new.in_word = false;
new.vec.push(String::new());
},
// DEFAULT
// Anything else
// -> Echo
( a, _ ) => new.emit(a)
}
new
}
/// Feeds an Iterator of chars, `it`, into the Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming the characters in `it`.
pub fn add_iter<I: Iterator<char>>(self, mut it: I) -> Tokeniser<Q, E, S> {
it.fold(self, |s, chr| s.add_char(chr))
}
/// Feeds a string, `string`, into the Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `string`.
pub fn add_string(self, string: &str) -> Tokeniser<Q, E, S> {
self.add_iter(string.chars())
}
/// Feeds a line, `line`, into the Tokeniser.
/// This differs from `add_str` in that the line is whitespace-trimmed
/// before adding.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `line`.
pub fn add_line(self, line: &str) -> Tokeniser<Q, E, S> {
self.add_string(line.trim())
}
/// Destroys the tokeniser, extracting the string vector.
///
/// # Return value
///
/// A Result, containing the tokenised string vector if the Tokeniser
/// was in a valid ending state, and an Error otherwise.
pub fn into_strings(mut self) -> Result<Vec<String>, Error> {
if self.error.is_some() {
Err(self.error.unwrap())
} else if self.in_word && self.quote.is_some() {
Err(UnmatchedQuote)
} else if self.escape.is_some() {
Err(UnfinishedEscape)
} else {
self.drop_empty_current_string();
Ok(self.vec)
}
}
/// Adds a character into a Tokeniser's current string.
/// This automatically sets the Tokeniser's state to be in a word,
/// and clears any escape sequence flag.
fn emit(&mut self, c: char) {
self.in_word = true;
self.escape = None;
self.vec.mut_last().mutate(|s| { s.push_char(c); s });
}
/// Switches on escape mode.
/// This automatically sets the Tokeniser to be in a word, if it isn't
/// already.
fn start_escaping(&mut self, c: char) {
self.escape = self.escape_map.find(&c).map(|a| a.clone());
self.in_word = true;
}
/// Drops the current working string, if it is empty.
fn drop_empty_current_string(&mut self) {
if self.vec.last().map(|s| s.is_empty()).unwrap_or(false) {
self.vec.pop();
}
}
} | // ESCAPE LEADER
// Escape leader, not in quotes | random_line_split |
tokeniser.rs | //! The _Tokeniser_ class.
#![experimental]
use std::char::is_whitespace;
use escape_scheme::EscapeScheme;
/// A tokeniser object.
///
/// A Tokeniser can be fed characters from an iterator, string, or individually.
/// It is an _immutable_ object: actions on a Tokeniser consume the Tokeniser,
/// and produce a fresh copy of the Tokeniser.
///
/// At any stage, a Tokeniser can be consumed to produce the vector of words
/// it has read, using the `into_strings` method. This method may fail if the
/// Tokeniser ended in a bad state (in the middle of a quoted string, or in
/// the middle of an escape sequence).
#[deriving(Clone)]
pub struct Tokeniser<Q, E, S> {
/// The current vector of parsed words.
vec: Vec<String>,
/// The current tokeniser error, if any.
/// An error ‘poisons’ the tokeniser, causing it to ignore any further
/// input.
error: Option<Error>,
/// Whether or not we are currently in a word.
in_word: bool,
/// The current closing quote character and quote mode, if any.
quote: Option<( char, QuoteMode )>,
/// The current escape scheme in use, if any.
escape: Option<S>,
/// Maps from quote openers to quote closers.
quote_map: Q,
/// Map from escape leader characters to their schemes.
escape_map: E,
}
/// A quote mode.
#[deriving(Clone)]
pub enum QuoteMode {
/// All characters except the closing character have their literal value.
/// This is equivalent to single-quoting in POSIX shell.
IgnoreEscapes,
/// All characters except the closing character and escape sequences
/// have their literal value. This is roughly equivalent to
/// double-quoting in POSIX shell.
ParseEscapes
}
/// A tokeniser error.
///
/// A Tokeniser's `into_strings` method can fail with one of the following
/// errors if called while the Tokeniser is in an unfinished state.
#[deriving(Clone, Eq, PartialEq, Show)]
pub enum Error {
/// A quotation was opened, but not closed.
UnmatchedQuote,
/// An escape sequence was started, but not finished.
UnfinishedEscape,
/// An unknown escape sequence was encountered.
BadEscape
}
impl<Q, E, S> Tokeniser<Q, E, S>
where Q: Map<char, ( char, QuoteMode )>,
E: Map<char, S>,
S: EscapeScheme,
Q: Clone,
E: Clone,
S: Clone,
Q: Collection {
/// Creates a new, blank Tokeniser.
///
/// # Arguments
///
/// * `quote_map` - A map, mapping characters that serve as opening quotes
/// to their closing quotes and quote modes.
/// * `escape_map` - A map, mapping escape leader characters to their escape
/// schemes. An empty map disables escapes.
///
/// # Return value
///
/// A new Tokeniser, with an empty state. Attempting to take the
/// string vector of the Tokeniser yields the empty vector.
///
/// # Example
///
/// ```rust
/// use std::collections::hashmap::HashMap;
/// use russet::{ Tokeniser, ParseEscapes, QuoteMode };
/// use russet::{ MapEscape, SimpleEscapeScheme };
///
/// let quote_map: HashMap<char, ( char, QuoteMode )> =
/// vec![ ( '\"', ( '\"', ParseEscapes ) ) ].move_iter().collect();
/// let escape_pairs: HashMap<char, char> =
/// vec![ ( 'n', '\n' ) ].move_iter().collect();
/// let escape_map: HashMap<char, SimpleEscapeScheme<HashMap<char, char>>> =
/// vec![ ( '\\', MapEscape(escape_pairs) )].move_iter().collect();
/// let tok = Tokeniser::new(quote_map, escape_map);
/// assert_eq!(tok.into_strings(), Ok(vec![]));
/// ```
pub fn new(quote_map: Q, escape_map: E) -> Tokeniser<Q, E, S> {
Tokeniser {
vec: vec![ String::new() ],
error: None,
in_word: false,
quote: None,
escape: None,
quote_map: quote_map,
escape_map: escape_map
}
}
/// Feeds a single character `chr` to a Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `chr`.
///
/// # Example
///
/// ```rust
/// use russet::whitespace_split_tokeniser;
///
/// let tok = whitespace_split_tokeniser();
/// let tok2 = tok.add_char('a').add_char('b').add_char('c');
/// assert_eq!(tok2.into_strings(), Ok(vec![ "abc".into_string() ]));
/// ```
pub fn add_ | f, chr: char) -> Tokeniser<Q, E, S> {
let mut new = self.clone();
match (chr, self) {
// ERROR
// Found an error
// -> Ignore input
( _, Tokeniser { error: Some(_),.. } ) => (),
// ESCAPE SEQUENCES
// Currently escaping
// -> Escape via escape scheme.
( c, Tokeniser { escape: Some(s),.. } ) => match s.escape(c) {
Some(cc) => new.emit(cc),
None => { new.error = Some(BadEscape); }
},
// ESCAPE LEADER
// Escape leader, not in quotes
// -> Begin escape (and word if not in one already)
( c, Tokeniser { escape: None,
quote: None,
escape_map: ref e,.. } ) if e.contains_key(&c) =>
new.start_escaping(c),
// Escape leader, in escape-permitting quotes
// -> Begin escape (and word if not in one already)
( c, Tokeniser { escape: None,
quote: Some(( _, ParseEscapes )),
escape_map: ref e,.. } ) if e.contains_key(&c) =>
new.start_escaping(c),
// QUOTE OPENING
// Quote opening character, not currently in quoted word
// -> Start quoting
( c, Tokeniser { escape: None, quote: None, quote_map: ref q,.. } )
if q.contains_key(&c) => {
new.quote = Some(q.find(&c).unwrap().clone());
new.in_word = true;
},
// QUOTE CLOSING
// Quote closing character, in quoted word, quotes ok
// -> Stop quoting
( c, Tokeniser { escape: None, quote: Some(( cc, _ )),.. } )
if c == cc => {
new.quote = None;
new.in_word = true;
},
// UNESCAPED WHITESPACE
// Unescaped whitespace, while not in a word
// -> Ignore
( a, Tokeniser { escape: None, in_word: false,.. } )
if is_whitespace(a) => (),
// Unescaped whitespace, while in a non-quoted word
// -> End word
( a, Tokeniser { escape: None, in_word: true, quote: None,.. } )
if is_whitespace(a) => {
new.in_word = false;
new.vec.push(String::new());
},
// DEFAULT
// Anything else
// -> Echo
( a, _ ) => new.emit(a)
}
new
}
/// Feeds an Iterator of chars, `it`, into the Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming the characters in `it`.
pub fn add_iter<I: Iterator<char>>(self, mut it: I) -> Tokeniser<Q, E, S> {
it.fold(self, |s, chr| s.add_char(chr))
}
/// Feeds a string, `string`, into the Tokeniser.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `string`.
pub fn add_string(self, string: &str) -> Tokeniser<Q, E, S> {
self.add_iter(string.chars())
}
/// Feeds a line, `line`, into the Tokeniser.
/// This differs from `add_str` in that the line is whitespace-trimmed
/// before adding.
///
/// # Return value
///
/// A new Tokeniser, representing the state of the Tokeniser after
/// consuming `line`.
pub fn add_line(self, line: &str) -> Tokeniser<Q, E, S> {
self.add_string(line.trim())
}
/// Destroys the tokeniser, extracting the string vector.
///
/// # Return value
///
/// A Result, containing the tokenised string vector if the Tokeniser
/// was in a valid ending state, and an Error otherwise.
pub fn into_strings(mut self) -> Result<Vec<String>, Error> {
if self.error.is_some() {
Err(self.error.unwrap())
} else if self.in_word && self.quote.is_some() {
Err(UnmatchedQuote)
} else if self.escape.is_some() {
Err(UnfinishedEscape)
} else {
self.drop_empty_current_string();
Ok(self.vec)
}
}
/// Adds a character into a Tokeniser's current string.
/// This automatically sets the Tokeniser's state to be in a word,
/// and clears any escape sequence flag.
fn emit(&mut self, c: char) {
self.in_word = true;
self.escape = None;
self.vec.mut_last().mutate(|s| { s.push_char(c); s });
}
/// Switches on escape mode.
/// This automatically sets the Tokeniser to be in a word, if it isn't
/// already.
fn start_escaping(&mut self, c: char) {
self.escape = self.escape_map.find(&c).map(|a| a.clone());
self.in_word = true;
}
/// Drops the current working string, if it is empty.
fn drop_empty_current_string(&mut self) {
if self.vec.last().map(|s| s.is_empty()).unwrap_or(false) {
self.vec.pop();
}
}
}
| char(sel | identifier_name |
vec.rs | //! Vector primitive.
use prelude::*;
use core::{mem, ops, ptr, slice};
use leak::Leak;
/// A low-level vector primitive.
///
/// This does not perform allocation nor reallaction, thus these have to be done manually.
/// Moreover, no destructors are called, making it possible to leak memory.
pub struct Vec<T: Leak> {
/// A pointer to the start of the buffer.
ptr: Pointer<T>,
/// The capacity of the buffer.
///
/// This demonstrates the lengths before reallocation is necessary.
cap: usize,
/// The length of the vector.
///
/// This is the number of elements from the start, that is initialized, and can be read safely.
len: usize,
}
impl<T: Leak> Vec<T> {
/// Create a vector from a block.
///
/// # Safety
///
/// This is unsafe, since it won't initialize the buffer in any way, possibly breaking type
/// safety, memory safety, and so on. Thus, care must be taken upon usage.
#[inline]
pub unsafe fn from_raw_parts(block: Block, len: usize) -> Vec<T> {
Vec {
len: len,
cap: block.size() / mem::size_of::<T>(),
ptr: Pointer::from(block).cast(),
}
}
/// Replace the inner buffer with a new one, and return the old.
///
/// This will memcpy the vectors buffer to the new block, and update the pointer and capacity
/// to match the given block.
///
/// # Panics
///
/// This panics if the vector is bigger than the block.
pub fn refill(&mut self, block: Block) -> Block {
log!(INTERNAL, "Refilling vector...");
// Calculate the new capacity.
let new_cap = block.size() / mem::size_of::<T>();
// Make some assertions.
assert!(
self.len <= new_cap,
"Block not large enough to cover the vector."
);
assert!(block.aligned_to(mem::align_of::<T>()), "Block not aligned.");
let old = mem::replace(self, Vec::default());
// Update the fields of `self`.
self.cap = new_cap;
self.ptr = Pointer::from(block).cast();
self.len = old.len;
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Due to the invariants of `Block`, this copy is safe (the pointer is valid and
// unaliased).
ptr::copy_nonoverlapping(old.ptr.get(), self.ptr.get(), old.len);
}
Block::from(old)
}
/// Get the capacity of this vector.
#[inline]
pub fn capacity(&self) -> usize {
self.cap
}
/// Push an element to the end of this vector.
///
/// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
#[inline]
pub fn push(&mut self, elem: T) -> Result<(), ()> {
if self.len == self.cap {
Err(())
} else {
// Place the element in the end of the vector.
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// By the invariants of this type (the size is bounded by the address space), this
// conversion isn't overflowing.
ptr::write((self.ptr.get()).offset(self.len as isize), elem);
}
// Increment the length.
self.len += 1;
Ok(())
}
}
/// Pop an element from the vector.
///
/// If the vector is empty, `None` is returned.
#[inline]
pub fn | (&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Decrement the length. This won't underflow due to the conditional above.
self.len -= 1;
// We use `ptr::read` since the element is unaccessible due to the decrease in the
// length.
Some(ptr::read(self.get_unchecked(self.len)))
}
}
}
/// Truncate this vector.
///
/// This is O(1).
///
/// # Panics
///
/// Panics on out-of-bound.
pub fn truncate(&mut self, len: usize) {
// Bound check.
assert!(len <= self.len, "Out of bound.");
self.len = len;
}
/// Yield an iterator popping from the vector.
pub fn pop_iter(&mut self) -> PopIter<T> {
PopIter { vec: self }
}
}
/// An iterator popping blocks from the bookkeeper.
pub struct PopIter<'a, T: 'a + Leak> {
vec: &'a mut Vec<T>,
}
impl<'a, T: Leak> Iterator for PopIter<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.vec.pop()
}
}
// TODO: Remove this in favour of `derive` when rust-lang/rust#35263 is fixed.
impl<T: Leak> Default for Vec<T> {
fn default() -> Vec<T> {
Vec {
ptr: Pointer::empty(),
cap: 0,
len: 0,
}
}
}
/// Cast this vector to the respective block.
impl<T: Leak> From<Vec<T>> for Block {
fn from(from: Vec<T>) -> Block {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
Block::from_raw_parts(from.ptr.cast(), from.cap * mem::size_of::<T>())
}
}
}
impl<T: Leak> ops::Deref for Vec<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts(self.ptr.get() as *const T, self.len)
}
}
}
impl<T: Leak> ops::DerefMut for Vec<T> {
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts_mut(self.ptr.get() as *mut T, self.len)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
#[test]
fn test_vec() {
let mut buffer = [b'a'; 32];
let mut vec = unsafe {
Vec::from_raw_parts(
Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32),
16,
)
};
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaa");
vec.push(b'b').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaab");
vec.push(b'c').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaabc");
vec[0] = b'.';
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
unsafe {
assert_eq!(
vec.refill(Block::from_raw_parts(
Pointer::new(&mut buffer[0] as *mut u8),
32
)).size(),
32
);
}
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
for _ in 0..14 {
vec.push(b'_').unwrap();
}
assert_eq!(vec.pop().unwrap(), b'_');
vec.push(b'@').unwrap();
vec.push(b'!').unwrap_err();
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc_____________@");
assert_eq!(vec.capacity(), 32);
for _ in 0..32 {
vec.pop().unwrap();
}
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
}
}
| pop | identifier_name |
vec.rs | //! Vector primitive.
use prelude::*;
use core::{mem, ops, ptr, slice};
use leak::Leak;
/// A low-level vector primitive.
///
/// This does not perform allocation nor reallaction, thus these have to be done manually.
/// Moreover, no destructors are called, making it possible to leak memory.
pub struct Vec<T: Leak> {
/// A pointer to the start of the buffer.
ptr: Pointer<T>,
/// The capacity of the buffer.
///
/// This demonstrates the lengths before reallocation is necessary.
cap: usize,
/// The length of the vector.
///
/// This is the number of elements from the start, that is initialized, and can be read safely.
len: usize,
}
impl<T: Leak> Vec<T> {
/// Create a vector from a block.
///
/// # Safety
///
/// This is unsafe, since it won't initialize the buffer in any way, possibly breaking type
/// safety, memory safety, and so on. Thus, care must be taken upon usage.
#[inline]
pub unsafe fn from_raw_parts(block: Block, len: usize) -> Vec<T> {
Vec {
len: len,
cap: block.size() / mem::size_of::<T>(),
ptr: Pointer::from(block).cast(),
}
}
/// Replace the inner buffer with a new one, and return the old.
///
/// This will memcpy the vectors buffer to the new block, and update the pointer and capacity
/// to match the given block.
///
/// # Panics
///
/// This panics if the vector is bigger than the block.
pub fn refill(&mut self, block: Block) -> Block {
log!(INTERNAL, "Refilling vector...");
// Calculate the new capacity.
let new_cap = block.size() / mem::size_of::<T>();
// Make some assertions.
assert!(
self.len <= new_cap,
"Block not large enough to cover the vector."
);
assert!(block.aligned_to(mem::align_of::<T>()), "Block not aligned.");
let old = mem::replace(self, Vec::default());
// Update the fields of `self`.
self.cap = new_cap;
self.ptr = Pointer::from(block).cast();
self.len = old.len;
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Due to the invariants of `Block`, this copy is safe (the pointer is valid and
// unaliased).
ptr::copy_nonoverlapping(old.ptr.get(), self.ptr.get(), old.len);
}
Block::from(old)
}
/// Get the capacity of this vector.
#[inline]
pub fn capacity(&self) -> usize {
self.cap
}
/// Push an element to the end of this vector.
///
/// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
#[inline]
pub fn push(&mut self, elem: T) -> Result<(), ()> {
if self.len == self.cap | else {
// Place the element in the end of the vector.
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// By the invariants of this type (the size is bounded by the address space), this
// conversion isn't overflowing.
ptr::write((self.ptr.get()).offset(self.len as isize), elem);
}
// Increment the length.
self.len += 1;
Ok(())
}
}
/// Pop an element from the vector.
///
/// If the vector is empty, `None` is returned.
#[inline]
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Decrement the length. This won't underflow due to the conditional above.
self.len -= 1;
// We use `ptr::read` since the element is unaccessible due to the decrease in the
// length.
Some(ptr::read(self.get_unchecked(self.len)))
}
}
}
/// Truncate this vector.
///
/// This is O(1).
///
/// # Panics
///
/// Panics on out-of-bound.
pub fn truncate(&mut self, len: usize) {
// Bound check.
assert!(len <= self.len, "Out of bound.");
self.len = len;
}
/// Yield an iterator popping from the vector.
pub fn pop_iter(&mut self) -> PopIter<T> {
PopIter { vec: self }
}
}
/// An iterator popping blocks from the bookkeeper.
pub struct PopIter<'a, T: 'a + Leak> {
vec: &'a mut Vec<T>,
}
impl<'a, T: Leak> Iterator for PopIter<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.vec.pop()
}
}
// TODO: Remove this in favour of `derive` when rust-lang/rust#35263 is fixed.
impl<T: Leak> Default for Vec<T> {
fn default() -> Vec<T> {
Vec {
ptr: Pointer::empty(),
cap: 0,
len: 0,
}
}
}
/// Cast this vector to the respective block.
impl<T: Leak> From<Vec<T>> for Block {
fn from(from: Vec<T>) -> Block {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
Block::from_raw_parts(from.ptr.cast(), from.cap * mem::size_of::<T>())
}
}
}
impl<T: Leak> ops::Deref for Vec<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts(self.ptr.get() as *const T, self.len)
}
}
}
impl<T: Leak> ops::DerefMut for Vec<T> {
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts_mut(self.ptr.get() as *mut T, self.len)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
#[test]
fn test_vec() {
let mut buffer = [b'a'; 32];
let mut vec = unsafe {
Vec::from_raw_parts(
Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32),
16,
)
};
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaa");
vec.push(b'b').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaab");
vec.push(b'c').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaabc");
vec[0] = b'.';
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
unsafe {
assert_eq!(
vec.refill(Block::from_raw_parts(
Pointer::new(&mut buffer[0] as *mut u8),
32
)).size(),
32
);
}
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
for _ in 0..14 {
vec.push(b'_').unwrap();
}
assert_eq!(vec.pop().unwrap(), b'_');
vec.push(b'@').unwrap();
vec.push(b'!').unwrap_err();
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc_____________@");
assert_eq!(vec.capacity(), 32);
for _ in 0..32 {
vec.pop().unwrap();
}
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
}
}
| {
Err(())
} | conditional_block |
vec.rs | //! Vector primitive.
use prelude::*;
use core::{mem, ops, ptr, slice};
use leak::Leak;
/// A low-level vector primitive.
///
/// This does not perform allocation nor reallaction, thus these have to be done manually.
/// Moreover, no destructors are called, making it possible to leak memory.
pub struct Vec<T: Leak> {
/// A pointer to the start of the buffer.
ptr: Pointer<T>,
/// The capacity of the buffer.
///
/// This demonstrates the lengths before reallocation is necessary.
cap: usize,
/// The length of the vector.
///
/// This is the number of elements from the start, that is initialized, and can be read safely.
len: usize,
}
impl<T: Leak> Vec<T> {
/// Create a vector from a block.
///
/// # Safety
///
/// This is unsafe, since it won't initialize the buffer in any way, possibly breaking type
/// safety, memory safety, and so on. Thus, care must be taken upon usage.
#[inline]
pub unsafe fn from_raw_parts(block: Block, len: usize) -> Vec<T> {
Vec {
len: len,
cap: block.size() / mem::size_of::<T>(),
ptr: Pointer::from(block).cast(),
}
}
/// Replace the inner buffer with a new one, and return the old.
///
/// This will memcpy the vectors buffer to the new block, and update the pointer and capacity
/// to match the given block.
///
/// # Panics
///
/// This panics if the vector is bigger than the block.
pub fn refill(&mut self, block: Block) -> Block | // LAST AUDIT: 2016-08-21 (Ticki).
// Due to the invariants of `Block`, this copy is safe (the pointer is valid and
// unaliased).
ptr::copy_nonoverlapping(old.ptr.get(), self.ptr.get(), old.len);
}
Block::from(old)
}
/// Get the capacity of this vector.
#[inline]
pub fn capacity(&self) -> usize {
self.cap
}
/// Push an element to the end of this vector.
///
/// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
#[inline]
pub fn push(&mut self, elem: T) -> Result<(), ()> {
if self.len == self.cap {
Err(())
} else {
// Place the element in the end of the vector.
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// By the invariants of this type (the size is bounded by the address space), this
// conversion isn't overflowing.
ptr::write((self.ptr.get()).offset(self.len as isize), elem);
}
// Increment the length.
self.len += 1;
Ok(())
}
}
/// Pop an element from the vector.
///
/// If the vector is empty, `None` is returned.
#[inline]
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Decrement the length. This won't underflow due to the conditional above.
self.len -= 1;
// We use `ptr::read` since the element is unaccessible due to the decrease in the
// length.
Some(ptr::read(self.get_unchecked(self.len)))
}
}
}
/// Truncate this vector.
///
/// This is O(1).
///
/// # Panics
///
/// Panics on out-of-bound.
pub fn truncate(&mut self, len: usize) {
// Bound check.
assert!(len <= self.len, "Out of bound.");
self.len = len;
}
/// Yield an iterator popping from the vector.
pub fn pop_iter(&mut self) -> PopIter<T> {
PopIter { vec: self }
}
}
/// An iterator popping blocks from the bookkeeper.
pub struct PopIter<'a, T: 'a + Leak> {
vec: &'a mut Vec<T>,
}
impl<'a, T: Leak> Iterator for PopIter<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.vec.pop()
}
}
// TODO: Remove this in favour of `derive` when rust-lang/rust#35263 is fixed.
impl<T: Leak> Default for Vec<T> {
fn default() -> Vec<T> {
Vec {
ptr: Pointer::empty(),
cap: 0,
len: 0,
}
}
}
/// Cast this vector to the respective block.
impl<T: Leak> From<Vec<T>> for Block {
fn from(from: Vec<T>) -> Block {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
Block::from_raw_parts(from.ptr.cast(), from.cap * mem::size_of::<T>())
}
}
}
impl<T: Leak> ops::Deref for Vec<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts(self.ptr.get() as *const T, self.len)
}
}
}
impl<T: Leak> ops::DerefMut for Vec<T> {
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts_mut(self.ptr.get() as *mut T, self.len)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
#[test]
fn test_vec() {
let mut buffer = [b'a'; 32];
let mut vec = unsafe {
Vec::from_raw_parts(
Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32),
16,
)
};
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaa");
vec.push(b'b').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaab");
vec.push(b'c').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaabc");
vec[0] = b'.';
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
unsafe {
assert_eq!(
vec.refill(Block::from_raw_parts(
Pointer::new(&mut buffer[0] as *mut u8),
32
)).size(),
32
);
}
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
for _ in 0..14 {
vec.push(b'_').unwrap();
}
assert_eq!(vec.pop().unwrap(), b'_');
vec.push(b'@').unwrap();
vec.push(b'!').unwrap_err();
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc_____________@");
assert_eq!(vec.capacity(), 32);
for _ in 0..32 {
vec.pop().unwrap();
}
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
}
}
| {
log!(INTERNAL, "Refilling vector...");
// Calculate the new capacity.
let new_cap = block.size() / mem::size_of::<T>();
// Make some assertions.
assert!(
self.len <= new_cap,
"Block not large enough to cover the vector."
);
assert!(block.aligned_to(mem::align_of::<T>()), "Block not aligned.");
let old = mem::replace(self, Vec::default());
// Update the fields of `self`.
self.cap = new_cap;
self.ptr = Pointer::from(block).cast();
self.len = old.len;
unsafe { | identifier_body |
vec.rs | //! Vector primitive.
use prelude::*;
use core::{mem, ops, ptr, slice};
use leak::Leak;
/// A low-level vector primitive.
///
/// This does not perform allocation nor reallaction, thus these have to be done manually.
/// Moreover, no destructors are called, making it possible to leak memory.
pub struct Vec<T: Leak> {
/// A pointer to the start of the buffer.
ptr: Pointer<T>,
/// The capacity of the buffer.
///
/// This demonstrates the lengths before reallocation is necessary.
cap: usize,
/// The length of the vector.
///
/// This is the number of elements from the start, that is initialized, and can be read safely.
len: usize,
}
impl<T: Leak> Vec<T> {
/// Create a vector from a block.
///
/// # Safety
///
/// This is unsafe, since it won't initialize the buffer in any way, possibly breaking type
/// safety, memory safety, and so on. Thus, care must be taken upon usage.
#[inline]
pub unsafe fn from_raw_parts(block: Block, len: usize) -> Vec<T> {
Vec {
len: len,
cap: block.size() / mem::size_of::<T>(),
ptr: Pointer::from(block).cast(),
}
}
/// Replace the inner buffer with a new one, and return the old.
///
/// This will memcpy the vectors buffer to the new block, and update the pointer and capacity
/// to match the given block.
///
/// # Panics
///
/// This panics if the vector is bigger than the block.
pub fn refill(&mut self, block: Block) -> Block {
log!(INTERNAL, "Refilling vector...");
// Calculate the new capacity.
let new_cap = block.size() / mem::size_of::<T>();
// Make some assertions.
assert!(
self.len <= new_cap,
"Block not large enough to cover the vector."
);
assert!(block.aligned_to(mem::align_of::<T>()), "Block not aligned.");
let old = mem::replace(self, Vec::default());
// Update the fields of `self`.
self.cap = new_cap;
self.ptr = Pointer::from(block).cast();
self.len = old.len;
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Due to the invariants of `Block`, this copy is safe (the pointer is valid and
// unaliased).
ptr::copy_nonoverlapping(old.ptr.get(), self.ptr.get(), old.len);
}
Block::from(old)
}
/// Get the capacity of this vector.
#[inline]
pub fn capacity(&self) -> usize {
self.cap
}
/// Push an element to the end of this vector.
///
/// On success, return `Ok(())`. On failure (not enough capacity), return `Err(())`.
#[inline]
pub fn push(&mut self, elem: T) -> Result<(), ()> {
if self.len == self.cap {
Err(())
} else {
// Place the element in the end of the vector.
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
| // By the invariants of this type (the size is bounded by the address space), this
// conversion isn't overflowing.
ptr::write((self.ptr.get()).offset(self.len as isize), elem);
}
// Increment the length.
self.len += 1;
Ok(())
}
}
/// Pop an element from the vector.
///
/// If the vector is empty, `None` is returned.
#[inline]
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Decrement the length. This won't underflow due to the conditional above.
self.len -= 1;
// We use `ptr::read` since the element is unaccessible due to the decrease in the
// length.
Some(ptr::read(self.get_unchecked(self.len)))
}
}
}
/// Truncate this vector.
///
/// This is O(1).
///
/// # Panics
///
/// Panics on out-of-bound.
pub fn truncate(&mut self, len: usize) {
// Bound check.
assert!(len <= self.len, "Out of bound.");
self.len = len;
}
/// Yield an iterator popping from the vector.
pub fn pop_iter(&mut self) -> PopIter<T> {
PopIter { vec: self }
}
}
/// An iterator popping blocks from the bookkeeper.
pub struct PopIter<'a, T: 'a + Leak> {
vec: &'a mut Vec<T>,
}
impl<'a, T: Leak> Iterator for PopIter<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.vec.pop()
}
}
// TODO: Remove this in favour of `derive` when rust-lang/rust#35263 is fixed.
impl<T: Leak> Default for Vec<T> {
fn default() -> Vec<T> {
Vec {
ptr: Pointer::empty(),
cap: 0,
len: 0,
}
}
}
/// Cast this vector to the respective block.
impl<T: Leak> From<Vec<T>> for Block {
fn from(from: Vec<T>) -> Block {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
Block::from_raw_parts(from.ptr.cast(), from.cap * mem::size_of::<T>())
}
}
}
impl<T: Leak> ops::Deref for Vec<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts(self.ptr.get() as *const T, self.len)
}
}
}
impl<T: Leak> ops::DerefMut for Vec<T> {
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// The invariants maintains safety.
slice::from_raw_parts_mut(self.ptr.get() as *mut T, self.len)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
#[test]
fn test_vec() {
let mut buffer = [b'a'; 32];
let mut vec = unsafe {
Vec::from_raw_parts(
Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32),
16,
)
};
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaa");
vec.push(b'b').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaab");
vec.push(b'c').unwrap();
assert_eq!(&*vec, b"aaaaaaaaaaaaaaaabc");
vec[0] = b'.';
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
unsafe {
assert_eq!(
vec.refill(Block::from_raw_parts(
Pointer::new(&mut buffer[0] as *mut u8),
32
)).size(),
32
);
}
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
for _ in 0..14 {
vec.push(b'_').unwrap();
}
assert_eq!(vec.pop().unwrap(), b'_');
vec.push(b'@').unwrap();
vec.push(b'!').unwrap_err();
assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc_____________@");
assert_eq!(vec.capacity(), 32);
for _ in 0..32 {
vec.pop().unwrap();
}
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
assert!(vec.pop().is_none());
}
} | random_line_split |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
#[cfg(not(target_os = "windows"))]
extern crate gaol;
#[macro_use]
extern crate gleam;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate style;
pub extern crate url;
pub extern crate util;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
extern crate webrender_traits;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use compositing::{CompositorProxy, IOCompositor};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use net::bluetooth_thread::BluetoothThreadFactory;
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use net_traits::bluetooth_thread::BluetoothMethodMsg;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::ConstellationMsg;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use util::resource_files::resources_dir_path;
use util::{opts, prefs};
pub use gleam::gl;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
script::init();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let (webrender, webrender_api_sender) = if opts::get().use_webrender {
let mut resource_path = resources_dir_path();
resource_path.push("shaders");
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let (webrender, webrender_sender) =
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_path: resource_path,
enable_aa: opts.enable_text_antialiasing,
enable_msaa: opts.use_msaa,
enable_profiler: opts.webrender_stats,
});
(Some(webrender), Some(webrender_sender))
} else {
(None, None)
};
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let constellation_chan = create_constellation(opts.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
devtools_chan,
supports_clipboard,
webrender_api_sender.clone());
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) |
}
fn create_constellation(opts: opts::Opts,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender_api_sender: Option<webrender_traits::RenderApiSender>) -> Sender<ConstellationMsg> {
let bluetooth_thread: IpcSender<BluetoothMethodMsg> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(opts.user_agent.clone(),
devtools_chan.clone(),
time_profiler_chan.clone());
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let constellation_chan =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
// Send the URL command to the constellation.
match opts.url {
Some(url) => {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
},
None => ()
};
constellation_chan
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
prefs::extend_prefs(unprivileged_content.prefs());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
script::init();
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| {
self.compositor.title_for_main_frame()
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
#[cfg(not(target_os = "windows"))]
extern crate gaol;
#[macro_use]
extern crate gleam;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate style;
pub extern crate url;
pub extern crate util;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
extern crate webrender_traits;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use compositing::{CompositorProxy, IOCompositor};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use net::bluetooth_thread::BluetoothThreadFactory;
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use net_traits::bluetooth_thread::BluetoothMethodMsg;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::ConstellationMsg;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use util::resource_files::resources_dir_path;
use util::{opts, prefs};
pub use gleam::gl;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
script::init();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let (webrender, webrender_api_sender) = if opts::get().use_webrender {
let mut resource_path = resources_dir_path();
resource_path.push("shaders");
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let (webrender, webrender_sender) =
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_path: resource_path,
enable_aa: opts.enable_text_antialiasing,
enable_msaa: opts.use_msaa,
enable_profiler: opts.webrender_stats,
});
(Some(webrender), Some(webrender_sender))
} else {
(None, None)
};
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let constellation_chan = create_constellation(opts.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
devtools_chan,
supports_clipboard,
webrender_api_sender.clone());
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
}
fn create_constellation(opts: opts::Opts,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender_api_sender: Option<webrender_traits::RenderApiSender>) -> Sender<ConstellationMsg> {
let bluetooth_thread: IpcSender<BluetoothMethodMsg> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(opts.user_agent.clone(),
devtools_chan.clone(),
time_profiler_chan.clone());
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let constellation_chan =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
// Send the URL command to the constellation.
match opts.url {
Some(url) => {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
},
None => ()
};
constellation_chan
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
prefs::extend_prefs(unprivileged_content.prefs());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
script::init();
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn | () {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| create_sandbox | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a | //! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
#[cfg(not(target_os = "windows"))]
extern crate gaol;
#[macro_use]
extern crate gleam;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate style;
pub extern crate url;
pub extern crate util;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
extern crate webrender_traits;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use compositing::{CompositorProxy, IOCompositor};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use net::bluetooth_thread::BluetoothThreadFactory;
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use net_traits::bluetooth_thread::BluetoothMethodMsg;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::ConstellationMsg;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use util::resource_files::resources_dir_path;
use util::{opts, prefs};
pub use gleam::gl;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
script::init();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let (webrender, webrender_api_sender) = if opts::get().use_webrender {
let mut resource_path = resources_dir_path();
resource_path.push("shaders");
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let (webrender, webrender_sender) =
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_path: resource_path,
enable_aa: opts.enable_text_antialiasing,
enable_msaa: opts.use_msaa,
enable_profiler: opts.webrender_stats,
});
(Some(webrender), Some(webrender_sender))
} else {
(None, None)
};
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let constellation_chan = create_constellation(opts.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
devtools_chan,
supports_clipboard,
webrender_api_sender.clone());
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
}
fn create_constellation(opts: opts::Opts,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender_api_sender: Option<webrender_traits::RenderApiSender>) -> Sender<ConstellationMsg> {
let bluetooth_thread: IpcSender<BluetoothMethodMsg> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(opts.user_agent.clone(),
devtools_chan.clone(),
time_profiler_chan.clone());
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let constellation_chan =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
// Send the URL command to the constellation.
match opts.url {
Some(url) => {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
},
None => ()
};
constellation_chan
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
prefs::extend_prefs(unprivileged_content.prefs());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
script::init();
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
} | //! `Constellation`, which does the heavy lifting of coordinating all | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
#[cfg(not(target_os = "windows"))]
extern crate gaol;
#[macro_use]
extern crate gleam;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate style;
pub extern crate url;
pub extern crate util;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
extern crate webrender_traits;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use compositing::{CompositorProxy, IOCompositor};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use net::bluetooth_thread::BluetoothThreadFactory;
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use net_traits::bluetooth_thread::BluetoothMethodMsg;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::ConstellationMsg;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use util::resource_files::resources_dir_path;
use util::{opts, prefs};
pub use gleam::gl;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
script::init();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let (webrender, webrender_api_sender) = if opts::get().use_webrender {
let mut resource_path = resources_dir_path();
resource_path.push("shaders");
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let (webrender, webrender_sender) =
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_path: resource_path,
enable_aa: opts.enable_text_antialiasing,
enable_msaa: opts.use_msaa,
enable_profiler: opts.webrender_stats,
});
(Some(webrender), Some(webrender_sender))
} else {
(None, None)
};
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let constellation_chan = create_constellation(opts.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
devtools_chan,
supports_clipboard,
webrender_api_sender.clone());
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port |
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
}
fn create_constellation(opts: opts::Opts,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender_api_sender: Option<webrender_traits::RenderApiSender>) -> Sender<ConstellationMsg> {
let bluetooth_thread: IpcSender<BluetoothMethodMsg> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(opts.user_agent.clone(),
devtools_chan.clone(),
time_profiler_chan.clone());
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
webrender_api_sender.as_ref().map(|wr| wr.create_api()));
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let constellation_chan =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
// Send the URL command to the constellation.
match opts.url {
Some(url) => {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
},
None => ()
};
constellation_chan
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
prefs::extend_prefs(unprivileged_content.prefs());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
script::init();
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| {
webdriver(port, constellation_chan.clone());
} | conditional_block |
size.rs | use std::{io, mem};
use super::cvt;
use super::libc::{c_ushort, ioctl, STDOUT_FILENO, TIOCGWINSZ};
#[repr(C)]
struct TermSize { | y: c_ushort,
}
/// Get the size of the terminal.
pub fn terminal_size() -> io::Result<(u16, u16)> {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(STDOUT_FILENO, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.col as u16, size.row as u16))
}
}
/// Get the size of the terminal, in pixels
pub fn terminal_size_pixels() -> io::Result<(u16, u16)> {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(STDOUT_FILENO, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.x as u16, size.y as u16))
}
} | row: c_ushort,
col: c_ushort,
x: c_ushort, | random_line_split |
size.rs | use std::{io, mem};
use super::cvt;
use super::libc::{c_ushort, ioctl, STDOUT_FILENO, TIOCGWINSZ};
#[repr(C)]
struct TermSize {
row: c_ushort,
col: c_ushort,
x: c_ushort,
y: c_ushort,
}
/// Get the size of the terminal.
pub fn terminal_size() -> io::Result<(u16, u16)> |
/// Get the size of the terminal, in pixels
pub fn terminal_size_pixels() -> io::Result<(u16, u16)> {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(STDOUT_FILENO, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.x as u16, size.y as u16))
}
}
| {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(STDOUT_FILENO, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.col as u16, size.row as u16))
}
} | identifier_body |
size.rs | use std::{io, mem};
use super::cvt;
use super::libc::{c_ushort, ioctl, STDOUT_FILENO, TIOCGWINSZ};
#[repr(C)]
struct | {
row: c_ushort,
col: c_ushort,
x: c_ushort,
y: c_ushort,
}
/// Get the size of the terminal.
pub fn terminal_size() -> io::Result<(u16, u16)> {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(STDOUT_FILENO, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.col as u16, size.row as u16))
}
}
/// Get the size of the terminal, in pixels
pub fn terminal_size_pixels() -> io::Result<(u16, u16)> {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(STDOUT_FILENO, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.x as u16, size.y as u16))
}
}
| TermSize | identifier_name |
smallintmap.rs | use std::collections::SmallIntMap;
struct Tenant<'a> {
name: &'a str,
phone: &'a str,
}
fn main() | apartments.pop(&1);
match apartments.find_mut(&3) {
Some(henrietta) => henrietta.name = "David and Henrietta Smith",
_ => println!("Oh no! Where did David and Henrietta go?"),
}
apartments.insert(0, Tenant {
name: "Phillip Davis",
phone: "5555-7869",
});
for (key, tenant) in apartments.iter(){
println!("{}: {} ({})", key, tenant.name, tenant.phone);
}
}
| {
// Start with 5 apartments
let mut apartments = SmallIntMap::with_capacity(5);
// The compiler infers 1 as uint
apartments.insert(1, Tenant {
name: "John Smith",
phone: "555-1234",
});
apartments.insert(3, Tenant {
name: "Henrietta George",
phone: "555-2314",
});
apartments.insert(5, Tenant {
name: "David Rogers",
phone: "555-5467",
});
| identifier_body |
smallintmap.rs | use std::collections::SmallIntMap;
struct Tenant<'a> {
name: &'a str,
phone: &'a str,
}
fn main() {
// Start with 5 apartments
let mut apartments = SmallIntMap::with_capacity(5);
// The compiler infers 1 as uint
apartments.insert(1, Tenant {
name: "John Smith",
phone: "555-1234",
});
apartments.insert(3, Tenant {
name: "Henrietta George",
phone: "555-2314", | name: "David Rogers",
phone: "555-5467",
});
apartments.pop(&1);
match apartments.find_mut(&3) {
Some(henrietta) => henrietta.name = "David and Henrietta Smith",
_ => println!("Oh no! Where did David and Henrietta go?"),
}
apartments.insert(0, Tenant {
name: "Phillip Davis",
phone: "5555-7869",
});
for (key, tenant) in apartments.iter(){
println!("{}: {} ({})", key, tenant.name, tenant.phone);
}
} | });
apartments.insert(5, Tenant { | random_line_split |
smallintmap.rs | use std::collections::SmallIntMap;
struct Tenant<'a> {
name: &'a str,
phone: &'a str,
}
fn | () {
// Start with 5 apartments
let mut apartments = SmallIntMap::with_capacity(5);
// The compiler infers 1 as uint
apartments.insert(1, Tenant {
name: "John Smith",
phone: "555-1234",
});
apartments.insert(3, Tenant {
name: "Henrietta George",
phone: "555-2314",
});
apartments.insert(5, Tenant {
name: "David Rogers",
phone: "555-5467",
});
apartments.pop(&1);
match apartments.find_mut(&3) {
Some(henrietta) => henrietta.name = "David and Henrietta Smith",
_ => println!("Oh no! Where did David and Henrietta go?"),
}
apartments.insert(0, Tenant {
name: "Phillip Davis",
phone: "5555-7869",
});
for (key, tenant) in apartments.iter(){
println!("{}: {} ({})", key, tenant.name, tenant.phone);
}
}
| main | identifier_name |
server.rs | use container::{Container, ControlDispatcher};
use config::Config;
use lssa::control::Control;
use lssa::manager::AppManager;
use futures;
use futures::Future;
use futures::Stream;
use futures::Sink;
//use futures::{StreamExt, FutureExt};
pub struct Server {
container: Container
}
impl Server {
pub fn new(config: Config) -> Server {
Server {
container: Container::new(config)
}
}
fn | (container: Container) -> futures::sync::mpsc::Sender<Control> {
let (tx, rx) = futures::sync::mpsc::channel(4096);
::std::thread::spawn(move || {
::tokio::executor::current_thread::block_on_all(
futures::future::ok(()).map(move |_| {
let mut manager = AppManager::new(container.clone());
load_apps_from_config(
&mut manager,
&container.config_state.read().unwrap().config
);
manager
}).map(move |mut manager| {
rx.for_each(move |c| {
manager.dispatch_control(c);
Ok(())
})
}).flatten().map_err(|_: ()| ())
).unwrap();
});
tx
}
pub fn run_apps(&self) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = futures::sync::mpsc::channel::<Control>(4096);
self.container.set_control_dispatcher(ControlDispatcher::new(tx));
let container = self.container.clone();
let mut control_sender = Self::launch_manager(container);
futures::future::ok(()).then(move |_: Result<(), ()>| {
rx.for_each(move |c| {
control_sender.start_send(c).unwrap();
Ok(())
}).map(|_| ()).map_err(|_| ())
})
}
}
fn load_apps_from_config(manager: &mut AppManager, config: &Config) {
use std::fs::File;
use std::io::Read;
for (i, app) in config.applications.iter().enumerate() {
let mut code_file = match File::open(
&::std::path::Path::new(
&app.path
).join(&app.metadata.bin).to_str().unwrap()
) {
Ok(v) => v,
Err(e) => {
dwarning!(
logger!("load_apps_from_config"),
"Unable to load app `{}`: {:?}",
app.name,
e
);
continue;
}
};
let mut code: Vec<u8> = Vec::new();
code_file.read_to_end(&mut code).unwrap();
manager.load(&code, i, app.clone());
}
}
| launch_manager | identifier_name |
server.rs | use container::{Container, ControlDispatcher};
use config::Config;
use lssa::control::Control;
use lssa::manager::AppManager;
use futures;
use futures::Future;
use futures::Stream;
use futures::Sink;
//use futures::{StreamExt, FutureExt};
pub struct Server {
container: Container
}
impl Server {
pub fn new(config: Config) -> Server {
Server {
container: Container::new(config)
}
}
fn launch_manager(container: Container) -> futures::sync::mpsc::Sender<Control> {
let (tx, rx) = futures::sync::mpsc::channel(4096);
::std::thread::spawn(move || {
::tokio::executor::current_thread::block_on_all(
futures::future::ok(()).map(move |_| {
let mut manager = AppManager::new(container.clone());
load_apps_from_config(
&mut manager,
&container.config_state.read().unwrap().config
);
manager
}).map(move |mut manager| {
rx.for_each(move |c| {
manager.dispatch_control(c);
Ok(())
})
}).flatten().map_err(|_: ()| ())
).unwrap();
}); | tx
}
pub fn run_apps(&self) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = futures::sync::mpsc::channel::<Control>(4096);
self.container.set_control_dispatcher(ControlDispatcher::new(tx));
let container = self.container.clone();
let mut control_sender = Self::launch_manager(container);
futures::future::ok(()).then(move |_: Result<(), ()>| {
rx.for_each(move |c| {
control_sender.start_send(c).unwrap();
Ok(())
}).map(|_| ()).map_err(|_| ())
})
}
}
fn load_apps_from_config(manager: &mut AppManager, config: &Config) {
use std::fs::File;
use std::io::Read;
for (i, app) in config.applications.iter().enumerate() {
let mut code_file = match File::open(
&::std::path::Path::new(
&app.path
).join(&app.metadata.bin).to_str().unwrap()
) {
Ok(v) => v,
Err(e) => {
dwarning!(
logger!("load_apps_from_config"),
"Unable to load app `{}`: {:?}",
app.name,
e
);
continue;
}
};
let mut code: Vec<u8> = Vec::new();
code_file.read_to_end(&mut code).unwrap();
manager.load(&code, i, app.clone());
}
} | random_line_split |
|
server.rs | use container::{Container, ControlDispatcher};
use config::Config;
use lssa::control::Control;
use lssa::manager::AppManager;
use futures;
use futures::Future;
use futures::Stream;
use futures::Sink;
//use futures::{StreamExt, FutureExt};
pub struct Server {
container: Container
}
impl Server {
pub fn new(config: Config) -> Server |
fn launch_manager(container: Container) -> futures::sync::mpsc::Sender<Control> {
let (tx, rx) = futures::sync::mpsc::channel(4096);
::std::thread::spawn(move || {
::tokio::executor::current_thread::block_on_all(
futures::future::ok(()).map(move |_| {
let mut manager = AppManager::new(container.clone());
load_apps_from_config(
&mut manager,
&container.config_state.read().unwrap().config
);
manager
}).map(move |mut manager| {
rx.for_each(move |c| {
manager.dispatch_control(c);
Ok(())
})
}).flatten().map_err(|_: ()| ())
).unwrap();
});
tx
}
pub fn run_apps(&self) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = futures::sync::mpsc::channel::<Control>(4096);
self.container.set_control_dispatcher(ControlDispatcher::new(tx));
let container = self.container.clone();
let mut control_sender = Self::launch_manager(container);
futures::future::ok(()).then(move |_: Result<(), ()>| {
rx.for_each(move |c| {
control_sender.start_send(c).unwrap();
Ok(())
}).map(|_| ()).map_err(|_| ())
})
}
}
fn load_apps_from_config(manager: &mut AppManager, config: &Config) {
use std::fs::File;
use std::io::Read;
for (i, app) in config.applications.iter().enumerate() {
let mut code_file = match File::open(
&::std::path::Path::new(
&app.path
).join(&app.metadata.bin).to_str().unwrap()
) {
Ok(v) => v,
Err(e) => {
dwarning!(
logger!("load_apps_from_config"),
"Unable to load app `{}`: {:?}",
app.name,
e
);
continue;
}
};
let mut code: Vec<u8> = Vec::new();
code_file.read_to_end(&mut code).unwrap();
manager.load(&code, i, app.clone());
}
}
| {
Server {
container: Container::new(config)
}
} | identifier_body |
surface_state.rs | //! TODO Documentation
use std::marker::PhantomData;
use crate::libc::c_int;
use wlroots_sys::{wl_output_transform, wl_resource, wlr_surface_state};
use crate::{render::PixmanRegion, surface::Surface};
#[derive(Debug)]
#[repr(u32)]
/// Represents a change in the pending state.
///
/// When a particular bit is set, it means the field corresponding to it
/// will be updated for the current state on the next commit.
///
/// # Pending vs Current state
/// When this is set on the pending state, it means this field will be updated
/// on the next commit.
///
/// When it is set on the current state, it indicates what fields have changed
/// since the last commit.
pub enum InvalidState {
Buffer = 1,
SurfaceDamage = 2,
BufferDamage = 4,
OpaqueRegion = 8,
InputRegion = 16,
Transform = 32,
Scale = 64,
SubsurfacePosition = 128,
FrameCallbackList = 256
}
/// Surface state as reported by wlroots.
#[derive(Debug)]
pub struct State<'surface> {
state: wlr_surface_state,
phantom: PhantomData<&'surface Surface>
}
impl<'surface> State<'surface> {
/// Create a new subsurface from the given surface.
/// | State {
state,
phantom: PhantomData
}
}
/// Gets the state of the sub surface.
///
/// # Panics
/// If the invalid state is in an undefined state, this will panic.
pub fn committed(&self) -> InvalidState {
use self::InvalidState::*;
unsafe {
match self.state.committed {
1 => Buffer,
2 => SurfaceDamage,
4 => BufferDamage,
8 => OpaqueRegion,
16 => InputRegion,
32 => Transform,
64 => Scale,
128 => SubsurfacePosition,
256 => FrameCallbackList,
invalid => {
wlr_log!(WLR_ERROR, "Invalid invalid state {}", invalid);
panic!("Invalid invalid state in wlr_surface_state")
}
}
}
}
/// Get the position of the surface relative to the previous position.
///
/// Return value is in (dx, dy) format.
pub fn position(&self) -> (i32, i32) {
unsafe { (self.state.dx, self.state.dy) }
}
/// Get the size of the sub surface.
///
/// Return value is in (width, height) format.
pub fn size(&self) -> (c_int, c_int) {
unsafe { (self.state.width, self.state.height) }
}
/// Get the size of the buffer.
///
/// Return value is iw (width, height) format.
pub fn buffer_size(&self) -> (c_int, c_int) {
unsafe { (self.state.buffer_width, self.state.buffer_height) }
}
/// Get the scale applied to the surface.
pub fn scale(&self) -> i32 {
unsafe { self.state.scale }
}
/// Get the output transform applied to the surface.
pub fn transform(&self) -> wl_output_transform {
unsafe { self.state.transform }
}
/// Gets the buffer of the surface.
pub unsafe fn buffer(&self) -> *mut wl_resource {
self.state.buffer_resource
}
pub unsafe fn surface_damage(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.surface_damage
}
}
pub unsafe fn buffer_damage(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.buffer_damage
}
}
pub unsafe fn opaque(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.opaque
}
}
pub unsafe fn input(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.input
}
}
} | /// # Safety
/// Since we rely on the surface providing a valid surface state,
/// this function is marked unsafe.
pub(crate) unsafe fn new(state: wlr_surface_state) -> State<'surface> { | random_line_split |
surface_state.rs | //! TODO Documentation
use std::marker::PhantomData;
use crate::libc::c_int;
use wlroots_sys::{wl_output_transform, wl_resource, wlr_surface_state};
use crate::{render::PixmanRegion, surface::Surface};
#[derive(Debug)]
#[repr(u32)]
/// Represents a change in the pending state.
///
/// When a particular bit is set, it means the field corresponding to it
/// will be updated for the current state on the next commit.
///
/// # Pending vs Current state
/// When this is set on the pending state, it means this field will be updated
/// on the next commit.
///
/// When it is set on the current state, it indicates what fields have changed
/// since the last commit.
pub enum InvalidState {
Buffer = 1,
SurfaceDamage = 2,
BufferDamage = 4,
OpaqueRegion = 8,
InputRegion = 16,
Transform = 32,
Scale = 64,
SubsurfacePosition = 128,
FrameCallbackList = 256
}
/// Surface state as reported by wlroots.
#[derive(Debug)]
pub struct State<'surface> {
state: wlr_surface_state,
phantom: PhantomData<&'surface Surface>
}
impl<'surface> State<'surface> {
/// Create a new subsurface from the given surface.
///
/// # Safety
/// Since we rely on the surface providing a valid surface state,
/// this function is marked unsafe.
pub(crate) unsafe fn new(state: wlr_surface_state) -> State<'surface> {
State {
state,
phantom: PhantomData
}
}
/// Gets the state of the sub surface.
///
/// # Panics
/// If the invalid state is in an undefined state, this will panic.
pub fn committed(&self) -> InvalidState {
use self::InvalidState::*;
unsafe {
match self.state.committed {
1 => Buffer,
2 => SurfaceDamage,
4 => BufferDamage,
8 => OpaqueRegion,
16 => InputRegion,
32 => Transform,
64 => Scale,
128 => SubsurfacePosition,
256 => FrameCallbackList,
invalid => |
}
}
}
/// Get the position of the surface relative to the previous position.
///
/// Return value is in (dx, dy) format.
pub fn position(&self) -> (i32, i32) {
unsafe { (self.state.dx, self.state.dy) }
}
/// Get the size of the sub surface.
///
/// Return value is in (width, height) format.
pub fn size(&self) -> (c_int, c_int) {
unsafe { (self.state.width, self.state.height) }
}
/// Get the size of the buffer.
///
/// Return value is iw (width, height) format.
pub fn buffer_size(&self) -> (c_int, c_int) {
unsafe { (self.state.buffer_width, self.state.buffer_height) }
}
/// Get the scale applied to the surface.
pub fn scale(&self) -> i32 {
unsafe { self.state.scale }
}
/// Get the output transform applied to the surface.
pub fn transform(&self) -> wl_output_transform {
unsafe { self.state.transform }
}
/// Gets the buffer of the surface.
pub unsafe fn buffer(&self) -> *mut wl_resource {
self.state.buffer_resource
}
pub unsafe fn surface_damage(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.surface_damage
}
}
pub unsafe fn buffer_damage(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.buffer_damage
}
}
pub unsafe fn opaque(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.opaque
}
}
pub unsafe fn input(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.input
}
}
}
| {
wlr_log!(WLR_ERROR, "Invalid invalid state {}", invalid);
panic!("Invalid invalid state in wlr_surface_state")
} | conditional_block |
surface_state.rs | //! TODO Documentation
use std::marker::PhantomData;
use crate::libc::c_int;
use wlroots_sys::{wl_output_transform, wl_resource, wlr_surface_state};
use crate::{render::PixmanRegion, surface::Surface};
#[derive(Debug)]
#[repr(u32)]
/// Represents a change in the pending state.
///
/// When a particular bit is set, it means the field corresponding to it
/// will be updated for the current state on the next commit.
///
/// # Pending vs Current state
/// When this is set on the pending state, it means this field will be updated
/// on the next commit.
///
/// When it is set on the current state, it indicates what fields have changed
/// since the last commit.
pub enum InvalidState {
Buffer = 1,
SurfaceDamage = 2,
BufferDamage = 4,
OpaqueRegion = 8,
InputRegion = 16,
Transform = 32,
Scale = 64,
SubsurfacePosition = 128,
FrameCallbackList = 256
}
/// Surface state as reported by wlroots.
#[derive(Debug)]
pub struct State<'surface> {
state: wlr_surface_state,
phantom: PhantomData<&'surface Surface>
}
impl<'surface> State<'surface> {
/// Create a new subsurface from the given surface.
///
/// # Safety
/// Since we rely on the surface providing a valid surface state,
/// this function is marked unsafe.
pub(crate) unsafe fn new(state: wlr_surface_state) -> State<'surface> {
State {
state,
phantom: PhantomData
}
}
/// Gets the state of the sub surface.
///
/// # Panics
/// If the invalid state is in an undefined state, this will panic.
pub fn committed(&self) -> InvalidState {
use self::InvalidState::*;
unsafe {
match self.state.committed {
1 => Buffer,
2 => SurfaceDamage,
4 => BufferDamage,
8 => OpaqueRegion,
16 => InputRegion,
32 => Transform,
64 => Scale,
128 => SubsurfacePosition,
256 => FrameCallbackList,
invalid => {
wlr_log!(WLR_ERROR, "Invalid invalid state {}", invalid);
panic!("Invalid invalid state in wlr_surface_state")
}
}
}
}
/// Get the position of the surface relative to the previous position.
///
/// Return value is in (dx, dy) format.
pub fn position(&self) -> (i32, i32) {
unsafe { (self.state.dx, self.state.dy) }
}
/// Get the size of the sub surface.
///
/// Return value is in (width, height) format.
pub fn size(&self) -> (c_int, c_int) {
unsafe { (self.state.width, self.state.height) }
}
/// Get the size of the buffer.
///
/// Return value is iw (width, height) format.
pub fn buffer_size(&self) -> (c_int, c_int) {
unsafe { (self.state.buffer_width, self.state.buffer_height) }
}
/// Get the scale applied to the surface.
pub fn | (&self) -> i32 {
unsafe { self.state.scale }
}
/// Get the output transform applied to the surface.
pub fn transform(&self) -> wl_output_transform {
unsafe { self.state.transform }
}
/// Gets the buffer of the surface.
pub unsafe fn buffer(&self) -> *mut wl_resource {
self.state.buffer_resource
}
pub unsafe fn surface_damage(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.surface_damage
}
}
pub unsafe fn buffer_damage(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.buffer_damage
}
}
pub unsafe fn opaque(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.opaque
}
}
pub unsafe fn input(&self) -> PixmanRegion {
PixmanRegion {
region: self.state.input
}
}
}
| scale | identifier_name |
build.rs | use std::env;
use std::process::Command;
use std::str::{self, FromStr};
// The rustc-cfg strings below are *not* public API. Please let us know by
// opening a GitHub issue if your build environment requires some way to enable
// these cfgs other than by executing our build script.
fn main() {
let minor = match rustc_minor_version() {
Some(minor) => minor,
None => return,
};
let target = env::var("TARGET").unwrap();
let emscripten = target == "asmjs-unknown-emscripten" || target == "wasm32-unknown-emscripten";
// CString::into_boxed_c_str stabilized in Rust 1.20:
// https://doc.rust-lang.org/std/ffi/struct.CString.html#method.into_boxed_c_str
if minor >= 20 {
println!("cargo:rustc-cfg=de_boxed_c_str");
}
// From<Box<T>> for Rc<T> / Arc<T> stabilized in Rust 1.21:
// https://doc.rust-lang.org/std/rc/struct.Rc.html#impl-From<Box<T>>
// https://doc.rust-lang.org/std/sync/struct.Arc.html#impl-From<Box<T>>
if minor >= 21 {
println!("cargo:rustc-cfg=de_rc_dst");
}
// Duration available in core since Rust 1.25:
// https://blog.rust-lang.org/2018/03/29/Rust-1.25.html#library-stabilizations
if minor >= 25 {
println!("cargo:rustc-cfg=core_duration");
}
// 128-bit integers stabilized in Rust 1.26:
// https://blog.rust-lang.org/2018/05/10/Rust-1.26.html
//
// Disabled on Emscripten targets as Emscripten doesn't
// currently support integers larger than 64 bits.
if minor >= 26 &&!emscripten {
println!("cargo:rustc-cfg=integer128");
}
// Inclusive ranges methods stabilized in Rust 1.27:
// https://github.com/rust-lang/rust/pull/50758
if minor >= 27 {
println!("cargo:rustc-cfg=range_inclusive");
}
// Non-zero integers stabilized in Rust 1.28:
// https://github.com/rust-lang/rust/pull/50808
if minor >= 28 {
println!("cargo:rustc-cfg=num_nonzero");
}
}
fn rustc_minor_version() -> Option<u32> {
let rustc = match env::var_os("RUSTC") {
Some(rustc) => rustc,
None => return None,
};
let output = match Command::new(rustc).arg("--version").output() {
Ok(output) => output,
Err(_) => return None, | };
let version = match str::from_utf8(&output.stdout) {
Ok(version) => version,
Err(_) => return None,
};
let mut pieces = version.split('.');
if pieces.next()!= Some("rustc 1") {
return None;
}
let next = match pieces.next() {
Some(next) => next,
None => return None,
};
u32::from_str(next).ok()
} | random_line_split |
|
build.rs | use std::env;
use std::process::Command;
use std::str::{self, FromStr};
// The rustc-cfg strings below are *not* public API. Please let us know by
// opening a GitHub issue if your build environment requires some way to enable
// these cfgs other than by executing our build script.
fn | () {
let minor = match rustc_minor_version() {
Some(minor) => minor,
None => return,
};
let target = env::var("TARGET").unwrap();
let emscripten = target == "asmjs-unknown-emscripten" || target == "wasm32-unknown-emscripten";
// CString::into_boxed_c_str stabilized in Rust 1.20:
// https://doc.rust-lang.org/std/ffi/struct.CString.html#method.into_boxed_c_str
if minor >= 20 {
println!("cargo:rustc-cfg=de_boxed_c_str");
}
// From<Box<T>> for Rc<T> / Arc<T> stabilized in Rust 1.21:
// https://doc.rust-lang.org/std/rc/struct.Rc.html#impl-From<Box<T>>
// https://doc.rust-lang.org/std/sync/struct.Arc.html#impl-From<Box<T>>
if minor >= 21 {
println!("cargo:rustc-cfg=de_rc_dst");
}
// Duration available in core since Rust 1.25:
// https://blog.rust-lang.org/2018/03/29/Rust-1.25.html#library-stabilizations
if minor >= 25 {
println!("cargo:rustc-cfg=core_duration");
}
// 128-bit integers stabilized in Rust 1.26:
// https://blog.rust-lang.org/2018/05/10/Rust-1.26.html
//
// Disabled on Emscripten targets as Emscripten doesn't
// currently support integers larger than 64 bits.
if minor >= 26 &&!emscripten {
println!("cargo:rustc-cfg=integer128");
}
// Inclusive ranges methods stabilized in Rust 1.27:
// https://github.com/rust-lang/rust/pull/50758
if minor >= 27 {
println!("cargo:rustc-cfg=range_inclusive");
}
// Non-zero integers stabilized in Rust 1.28:
// https://github.com/rust-lang/rust/pull/50808
if minor >= 28 {
println!("cargo:rustc-cfg=num_nonzero");
}
}
fn rustc_minor_version() -> Option<u32> {
let rustc = match env::var_os("RUSTC") {
Some(rustc) => rustc,
None => return None,
};
let output = match Command::new(rustc).arg("--version").output() {
Ok(output) => output,
Err(_) => return None,
};
let version = match str::from_utf8(&output.stdout) {
Ok(version) => version,
Err(_) => return None,
};
let mut pieces = version.split('.');
if pieces.next()!= Some("rustc 1") {
return None;
}
let next = match pieces.next() {
Some(next) => next,
None => return None,
};
u32::from_str(next).ok()
}
| main | identifier_name |
build.rs | use std::env;
use std::process::Command;
use std::str::{self, FromStr};
// The rustc-cfg strings below are *not* public API. Please let us know by
// opening a GitHub issue if your build environment requires some way to enable
// these cfgs other than by executing our build script.
fn main() | }
// Duration available in core since Rust 1.25:
// https://blog.rust-lang.org/2018/03/29/Rust-1.25.html#library-stabilizations
if minor >= 25 {
println!("cargo:rustc-cfg=core_duration");
}
// 128-bit integers stabilized in Rust 1.26:
// https://blog.rust-lang.org/2018/05/10/Rust-1.26.html
//
// Disabled on Emscripten targets as Emscripten doesn't
// currently support integers larger than 64 bits.
if minor >= 26 &&!emscripten {
println!("cargo:rustc-cfg=integer128");
}
// Inclusive ranges methods stabilized in Rust 1.27:
// https://github.com/rust-lang/rust/pull/50758
if minor >= 27 {
println!("cargo:rustc-cfg=range_inclusive");
}
// Non-zero integers stabilized in Rust 1.28:
// https://github.com/rust-lang/rust/pull/50808
if minor >= 28 {
println!("cargo:rustc-cfg=num_nonzero");
}
}
fn rustc_minor_version() -> Option<u32> {
let rustc = match env::var_os("RUSTC") {
Some(rustc) => rustc,
None => return None,
};
let output = match Command::new(rustc).arg("--version").output() {
Ok(output) => output,
Err(_) => return None,
};
let version = match str::from_utf8(&output.stdout) {
Ok(version) => version,
Err(_) => return None,
};
let mut pieces = version.split('.');
if pieces.next()!= Some("rustc 1") {
return None;
}
let next = match pieces.next() {
Some(next) => next,
None => return None,
};
u32::from_str(next).ok()
}
| {
let minor = match rustc_minor_version() {
Some(minor) => minor,
None => return,
};
let target = env::var("TARGET").unwrap();
let emscripten = target == "asmjs-unknown-emscripten" || target == "wasm32-unknown-emscripten";
// CString::into_boxed_c_str stabilized in Rust 1.20:
// https://doc.rust-lang.org/std/ffi/struct.CString.html#method.into_boxed_c_str
if minor >= 20 {
println!("cargo:rustc-cfg=de_boxed_c_str");
}
// From<Box<T>> for Rc<T> / Arc<T> stabilized in Rust 1.21:
// https://doc.rust-lang.org/std/rc/struct.Rc.html#impl-From<Box<T>>
// https://doc.rust-lang.org/std/sync/struct.Arc.html#impl-From<Box<T>>
if minor >= 21 {
println!("cargo:rustc-cfg=de_rc_dst"); | identifier_body |
msg_pong.rs | use std;
use ::serialize::{self, Serializable};
use super::PingMessage;
use super::BIP0031_VERSION;
#[derive(Debug,Default,Clone)]
pub struct PongMessage
{
pub nonce: u64,
}
impl PongMessage {
pub fn new(ping:&PingMessage) -> PongMessage {
PongMessage{ nonce: ping.nonce }
}
}
impl super::Message for PongMessage {
fn get_command(&self) -> [u8; super::message_header::COMMAND_SIZE] {
super::message_header::COMMAND_PONG
}
}
impl std::fmt::Display for PongMessage {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Pong(nonce={})", self.nonce)
}
}
impl Serializable for PongMessage {
fn get_serialize_size(&self, ser:&serialize::SerializeParam) -> usize {
if BIP0031_VERSION < ser.version {
self.nonce.get_serialize_size(ser)
} else {
0usize
}
}
fn serialize(&self, io:&mut std::io::Write, ser:&serialize::SerializeParam) -> serialize::Result |
fn deserialize(&mut self, io:&mut std::io::Read, ser:&serialize::SerializeParam) -> serialize::Result {
if BIP0031_VERSION < ser.version {
self.nonce.deserialize(io, ser)
} else {
Ok(0usize)
}
}
}
| {
if BIP0031_VERSION < ser.version {
self.nonce.serialize(io, ser)
} else {
Ok(0usize)
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.