file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs
|
#[macro_use]
extern crate lazy_static;
extern crate rustc_serialize;
use rustc_serialize::json::Json;
use std::error::Error;
use std::path::Path;
use std::fs::File;
use std::io::Read;
pub mod common;
pub mod install;
pub mod scanner;
pub mod connection;
pub mod message;
pub mod runner;
pub fn plugin_json() -> Json {
let file_path = Path::new("rust.json");
let mut file = match File::open(&file_path) {
Err(why) => panic!("Couldn't open plugin meta file {}: {}", file_path.display(), Error::description(&why)),
Ok(file) => file,
};
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => panic!("Couldn't read plugin meta file {}: {}", file_path.display(), Error::description(&why)),
Ok(content) => content,
};
match Json::from_str(&s) {
Err(why) => panic!("Couldn't parse plugin JSON: {}", Error::description(&why)),
Ok(jsoncontent) => jsoncontent,
}
}
pub fn version() -> String
|
{
let json = plugin_json();
let ver = json.find_path(&["version"]).unwrap();
ver.to_string()
}
|
identifier_body
|
|
lib.rs
|
#[macro_use]
extern crate lazy_static;
extern crate rustc_serialize;
use rustc_serialize::json::Json;
use std::error::Error;
use std::path::Path;
use std::fs::File;
use std::io::Read;
pub mod common;
pub mod install;
pub mod scanner;
pub mod connection;
pub mod message;
pub mod runner;
pub fn plugin_json() -> Json {
let file_path = Path::new("rust.json");
let mut file = match File::open(&file_path) {
Err(why) => panic!("Couldn't open plugin meta file {}: {}", file_path.display(), Error::description(&why)),
Ok(file) => file,
};
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => panic!("Couldn't read plugin meta file {}: {}", file_path.display(), Error::description(&why)),
Ok(content) => content,
};
match Json::from_str(&s) {
Err(why) => panic!("Couldn't parse plugin JSON: {}", Error::description(&why)),
Ok(jsoncontent) => jsoncontent,
}
}
pub fn
|
() -> String {
let json = plugin_json();
let ver = json.find_path(&["version"]).unwrap();
ver.to_string()
}
|
version
|
identifier_name
|
lib.rs
|
#[macro_use]
extern crate lazy_static;
extern crate rustc_serialize;
use rustc_serialize::json::Json;
use std::error::Error;
use std::path::Path;
use std::fs::File;
use std::io::Read;
pub mod common;
pub mod install;
pub mod scanner;
pub mod connection;
pub mod message;
pub mod runner;
pub fn plugin_json() -> Json {
let file_path = Path::new("rust.json");
let mut file = match File::open(&file_path) {
Err(why) => panic!("Couldn't open plugin meta file {}: {}", file_path.display(), Error::description(&why)),
Ok(file) => file,
};
let mut s = String::new();
match file.read_to_string(&mut s) {
|
match Json::from_str(&s) {
Err(why) => panic!("Couldn't parse plugin JSON: {}", Error::description(&why)),
Ok(jsoncontent) => jsoncontent,
}
}
pub fn version() -> String {
let json = plugin_json();
let ver = json.find_path(&["version"]).unwrap();
ver.to_string()
}
|
Err(why) => panic!("Couldn't read plugin meta file {}: {}", file_path.display(), Error::description(&why)),
Ok(content) => content,
};
|
random_line_split
|
twitter_eggmode.rs
|
extern crate egg_mode;
extern crate tokio;
use super::notifier::Error;
use super::NotifierStrategy;
use super::Message;
use self::tokio::runtime::current_thread::block_on_all;
use std::string::ToString;
pub struct TwitterEggMode {
token: egg_mode::Token,
}
impl TwitterEggMode {
pub fn new(consumer_key: &str, consumer_secret: &str, access_key: &str, access_secret: &str) -> Self {
// create twitter client
let consumer = egg_mode::KeyPair::new(consumer_key.to_owned(), consumer_secret.to_owned());
let access = egg_mode::KeyPair::new(access_key.to_owned(), access_secret.to_owned());
let token = egg_mode::Token::Access { consumer, access };
Self { token }
}
}
impl NotifierStrategy for TwitterEggMode {
fn notify(&self, message: &Message) -> Result<(), Error> {
let truncated = message.truncate(140);
block_on_all(
egg_mode::tweet::DraftTweet::new(truncated.body())
.send(&self.token)
).map_err(|e| Error::FailedToPostMessage(e.to_string()))?;
|
}
}
|
Ok(())
|
random_line_split
|
twitter_eggmode.rs
|
extern crate egg_mode;
extern crate tokio;
use super::notifier::Error;
use super::NotifierStrategy;
use super::Message;
use self::tokio::runtime::current_thread::block_on_all;
use std::string::ToString;
pub struct TwitterEggMode {
token: egg_mode::Token,
}
impl TwitterEggMode {
pub fn new(consumer_key: &str, consumer_secret: &str, access_key: &str, access_secret: &str) -> Self {
// create twitter client
let consumer = egg_mode::KeyPair::new(consumer_key.to_owned(), consumer_secret.to_owned());
let access = egg_mode::KeyPair::new(access_key.to_owned(), access_secret.to_owned());
let token = egg_mode::Token::Access { consumer, access };
Self { token }
}
}
impl NotifierStrategy for TwitterEggMode {
fn notify(&self, message: &Message) -> Result<(), Error>
|
}
|
{
let truncated = message.truncate(140);
block_on_all(
egg_mode::tweet::DraftTweet::new(truncated.body())
.send(&self.token)
).map_err(|e| Error::FailedToPostMessage(e.to_string()))?;
Ok(())
}
|
identifier_body
|
twitter_eggmode.rs
|
extern crate egg_mode;
extern crate tokio;
use super::notifier::Error;
use super::NotifierStrategy;
use super::Message;
use self::tokio::runtime::current_thread::block_on_all;
use std::string::ToString;
pub struct TwitterEggMode {
token: egg_mode::Token,
}
impl TwitterEggMode {
pub fn
|
(consumer_key: &str, consumer_secret: &str, access_key: &str, access_secret: &str) -> Self {
// create twitter client
let consumer = egg_mode::KeyPair::new(consumer_key.to_owned(), consumer_secret.to_owned());
let access = egg_mode::KeyPair::new(access_key.to_owned(), access_secret.to_owned());
let token = egg_mode::Token::Access { consumer, access };
Self { token }
}
}
impl NotifierStrategy for TwitterEggMode {
fn notify(&self, message: &Message) -> Result<(), Error> {
let truncated = message.truncate(140);
block_on_all(
egg_mode::tweet::DraftTweet::new(truncated.body())
.send(&self.token)
).map_err(|e| Error::FailedToPostMessage(e.to_string()))?;
Ok(())
}
}
|
new
|
identifier_name
|
dict.rs
|
use std::rc::Rc;
/// Структура описывающая граммему.
///
/// Список всех граммем можно [посмотреть](http://opencorpora.org/dict.php?act=gram)
/// на сайте [opencorpora.org](http://opencorpora.org).
#[derive(Clone, Default, Debug)]
pub struct Grammeme {
/// Имя родительской граммемы (значение `None` индицирует, что это граммема верхнего уровня)
pub parent: Option<String>,
/// Имя граммемы на латинице
pub name: String,
/// Имя граммемы на кириллице
pub alias: String,
/// Подробное описание граммемы
pub description: String,
}
/// Тип ограничения на использование граммемы.
#[derive(Clone, Copy, Debug)]
pub enum RestrictionKind {
/// Необязательный
Maybe,
/// Обязательный
Obligatory,
/// Запрещающий
Forbidden,
}
/// Область ограничения на использование граммемы.
#[derive(Clone, Copy, Debug)]
pub enum RestrictionScope {
/// Лексема
Lemma,
/// Словоформа лексемы
Form,
}
/// Правило ограничивающие применение граммемы.
///
/// Более подробное описание [приведено](http://opencorpora.org/dict.php?act=gram_restr)
/// на сайте [opencorpora.org](http://opencorpora.org).
#[derive(Clone, Debug)]
pub struct Restriction {
/// Тип ограничения (см. [документацию](enum.RestrictionKind.html) типа `RestrictionKind`)
pub kind: RestrictionKind,
/// Приоритет (?)
pub auto: usize,
/// Область применения слева
pub left_scope: RestrictionScope,
/// Граммема слева.
/// Для некоторых правил значение справа может отсутствовать.
pub left_grammeme: Option<Rc<Grammeme>>,
/// Область применения справа
pub right_scope: RestrictionScope,
/// Граммема справа.
/// Для некоторых правил значение справа может отсутствовать.
pub right_grammeme: Option<Rc<Grammeme>>,
}
impl Default for Restriction {
fn default() -> Self {
Restriction{
kind: RestrictionKind::Maybe,
auto: 0,
left_scope: RestrictionScope::Lemma,
left_grammeme: None,
right_scope: RestrictionScope::Lemma,
right_grammeme: None,
}
}
}
/// Структура словоформы лексемы.
#[derive(Clone, Default, Debug)]
pub struct Form {
/// Текстовое представление словоформы
pub word: String,
/// Множество граммем описывающих словоформу
pub grammemes: Vec<Rc<Grammeme>>,
}
/// Структура описывающая лексему.
#[derive(Clone, Default, Debug)]
pub struct Lemma {
/// Числовой идентификатор лексемы
pub id: usize,
/// Номер ревизии
pub revision: usize,
/// Текстовое представление исходный словоформы лексемы
pub word: String,
/// Множество граммем описывающих лексему
pub grammemes: Vec<Rc<Grammeme>>,
/// Множество словоформ входящих в данную лексему
pub forms: Vec<Form>,
}
/// Тип связи между лексемами.
#[derive(Clone, Default, Debug)]
pub struct LinkKind {
/// Числовой идентификатор типа связи.
/// Используется в типе `Link`.
pub id: usize,
/// Имя типа связи
pub name: String,
}
/// Структура хранящая связь между двумя лексемами.
#[derive(Clone, Default, Debug)]
pub struct Link {
/// Числовой идентификатор связи
pub id: usize,
/// Лексема с исходной стороны связи
pub from: Rc<Lemma>,
|
pub to: Rc<Lemma>,
/// Типа связи
pub kind: Rc<LinkKind>,
}
/// Структура содержащая данные словаря.
#[derive(Default, Debug)]
pub struct Dict {
/// Версия словаря
pub version: String,
/// Номер ревизии
pub revision: usize,
/// Множество граммем
pub grammemes: Vec<Rc<Grammeme>>,
/// Множество правил-ограничений на использование граммем
pub restrictions: Vec<Restriction>,
/// Массив лексем
pub lemmata: Vec<Rc<Lemma>>,
/// Множество типов связей между лексемами
pub link_kinds: Vec<Rc<LinkKind>>,
/// Множество связей между лексемами
pub links: Vec<Link>,
}
|
/// Лексема с конечной стороны связи
|
random_line_split
|
dict.rs
|
use std::rc::Rc;
/// Структура описывающая граммему.
///
/// Список всех граммем можно [посмотреть](http://opencorpora.org/dict.php?act=gram)
/// на сайте [opencorpora.org](http://opencorpora.org).
#[derive(Clone, Default, Debug)]
pub struct Grammeme {
/// Имя родительской граммемы (значение `None` индиц
|
то это граммема верхнего уровня)
pub parent: Option<String>,
/// Имя граммемы на латинице
pub name: String,
/// Имя граммемы на кириллице
pub alias: String,
/// Подробное описание граммемы
pub description: String,
}
/// Тип ограничения на использование граммемы.
#[derive(Clone, Copy, Debug)]
pub enum RestrictionKind {
/// Необязательный
Maybe,
/// Обязательный
Obligatory,
/// Запрещающий
Forbidden,
}
/// Область ограничения на использование граммемы.
#[derive(Clone, Copy, Debug)]
pub enum RestrictionScope {
/// Лексема
Lemma,
/// Словоформа лексемы
Form,
}
/// Правило ограничивающие применение граммемы.
///
/// Более подробное описание [приведено](http://opencorpora.org/dict.php?act=gram_restr)
/// на сайте [opencorpora.org](http://opencorpora.org).
#[derive(Clone, Debug)]
pub struct Restriction {
/// Тип ограничения (см. [документацию](enum.RestrictionKind.html) типа `RestrictionKind`)
pub kind: RestrictionKind,
/// Приоритет (?)
pub auto: usize,
/// Область применения слева
pub left_scope: RestrictionScope,
/// Граммема слева.
/// Для некоторых правил значение справа может отсутствовать.
pub left_grammeme: Option<Rc<Grammeme>>,
/// Область применения справа
pub right_scope: RestrictionScope,
/// Граммема справа.
/// Для некоторых правил значение справа может отсутствовать.
pub right_grammeme: Option<Rc<Grammeme>>,
}
impl Default for Restriction {
fn default() -> Self {
Restriction{
kind: RestrictionKind::Maybe,
auto: 0,
left_scope: RestrictionScope::Lemma,
left_grammeme: None,
right_scope: RestrictionScope::Lemma,
right_grammeme: None,
}
}
}
/// Структура словоформы лексемы.
#[derive(Clone, Default, Debug)]
pub struct Form {
/// Текстовое представление словоформы
pub word: String,
/// Множество граммем описывающих словоформу
pub grammemes: Vec<Rc<Grammeme>>,
}
/// Структура описывающая лексему.
#[derive(Clone, Default, Debug)]
pub struct Lemma {
/// Числовой идентификатор лексемы
pub id: usize,
/// Номер ревизии
pub revision: usize,
/// Текстовое представление исходный словоформы лексемы
pub word: String,
/// Множество граммем описывающих лексему
pub grammemes: Vec<Rc<Grammeme>>,
/// Множество словоформ входящих в данную лексему
pub forms: Vec<Form>,
}
/// Тип связи между лексемами.
#[derive(Clone, Default, Debug)]
pub struct LinkKind {
/// Числовой идентификатор типа связи.
/// Используется в типе `Link`.
pub id: usize,
/// Имя типа связи
pub name: String,
}
/// Структура хранящая связь между двумя лексемами.
#[derive(Clone, Default, Debug)]
pub struct Link {
/// Числовой идентификатор связи
pub id: usize,
/// Лексема с исходной стороны связи
pub from: Rc<Lemma>,
/// Лексема с конечной стороны связи
pub to: Rc<Lemma>,
/// Типа связи
pub kind: Rc<LinkKind>,
}
/// Структура содержащая данные словаря.
#[derive(Default, Debug)]
pub struct Dict {
/// Версия словаря
pub version: String,
/// Номер ревизии
pub revision: usize,
/// Множество граммем
pub grammemes: Vec<Rc<Grammeme>>,
/// Множество правил-ограничений на использование граммем
pub restrictions: Vec<Restriction>,
/// Массив лексем
pub lemmata: Vec<Rc<Lemma>>,
/// Множество типов связей между лексемами
pub link_kinds: Vec<Rc<LinkKind>>,
/// Множество связей между лексемами
pub links: Vec<Link>,
}
|
ирует, ч
|
identifier_name
|
test_thread.rs
|
use rustler::thread;
use rustler::types::atom;
use rustler::{Atom, Encoder, Env};
#[rustler::nif]
pub fn threaded_fac(env: Env, n: u64) -> Atom {
// Multiply two numbers; panic on overflow. In Rust, the `*` operator wraps (rather than
// panicking) in release builds. A test depends on this panicking, so we make sure it panics in
// all builds. The test also checks the panic message.
fn mul(a: u64, b: u64) -> u64 {
a.checked_mul(b).expect("threaded_fac: integer overflow")
}
|
let result = (1..=n).fold(1, mul);
result.encode(thread_env)
});
atom::ok()
}
#[rustler::nif]
pub fn threaded_sleep(env: Env, msec: u64) -> Atom {
let q = msec / 1000;
let r = (msec % 1000) as u32;
thread::spawn::<thread::ThreadSpawner, _>(env, move |thread_env| {
std::thread::sleep(std::time::Duration::new(q as u64, r * 1_000_000));
msec.encode(thread_env)
});
atom::ok()
}
|
thread::spawn::<thread::ThreadSpawner, _>(env, move |thread_env| {
|
random_line_split
|
test_thread.rs
|
use rustler::thread;
use rustler::types::atom;
use rustler::{Atom, Encoder, Env};
#[rustler::nif]
pub fn threaded_fac(env: Env, n: u64) -> Atom
|
#[rustler::nif]
pub fn threaded_sleep(env: Env, msec: u64) -> Atom {
let q = msec / 1000;
let r = (msec % 1000) as u32;
thread::spawn::<thread::ThreadSpawner, _>(env, move |thread_env| {
std::thread::sleep(std::time::Duration::new(q as u64, r * 1_000_000));
msec.encode(thread_env)
});
atom::ok()
}
|
{
// Multiply two numbers; panic on overflow. In Rust, the `*` operator wraps (rather than
// panicking) in release builds. A test depends on this panicking, so we make sure it panics in
// all builds. The test also checks the panic message.
fn mul(a: u64, b: u64) -> u64 {
a.checked_mul(b).expect("threaded_fac: integer overflow")
}
thread::spawn::<thread::ThreadSpawner, _>(env, move |thread_env| {
let result = (1..=n).fold(1, mul);
result.encode(thread_env)
});
atom::ok()
}
|
identifier_body
|
test_thread.rs
|
use rustler::thread;
use rustler::types::atom;
use rustler::{Atom, Encoder, Env};
#[rustler::nif]
pub fn threaded_fac(env: Env, n: u64) -> Atom {
// Multiply two numbers; panic on overflow. In Rust, the `*` operator wraps (rather than
// panicking) in release builds. A test depends on this panicking, so we make sure it panics in
// all builds. The test also checks the panic message.
fn mul(a: u64, b: u64) -> u64 {
a.checked_mul(b).expect("threaded_fac: integer overflow")
}
thread::spawn::<thread::ThreadSpawner, _>(env, move |thread_env| {
let result = (1..=n).fold(1, mul);
result.encode(thread_env)
});
atom::ok()
}
#[rustler::nif]
pub fn
|
(env: Env, msec: u64) -> Atom {
let q = msec / 1000;
let r = (msec % 1000) as u32;
thread::spawn::<thread::ThreadSpawner, _>(env, move |thread_env| {
std::thread::sleep(std::time::Duration::new(q as u64, r * 1_000_000));
msec.encode(thread_env)
});
atom::ok()
}
|
threaded_sleep
|
identifier_name
|
net.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Net rpc interface.
use jsonrpc_core::Error;
build_rpc_trait! {
/// Net rpc interface.
pub trait Net {
/// Returns protocol version.
#[rpc(name = "net_version")]
fn version(&self) -> Result<String, Error>;
|
fn peer_count(&self) -> Result<String, Error>;
/// Returns true if client is actively listening for network connections.
/// Otherwise false.
#[rpc(name = "net_listening")]
fn is_listening(&self) -> Result<bool, Error>;
}
}
|
/// Returns number of peers connected to node.
#[rpc(name = "net_peerCount")]
|
random_line_split
|
cleanup.rs
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::mvcc::{
metrics::{MVCC_CONFLICT_COUNTER, MVCC_DUPLICATE_CMD_COUNTER_VEC},
ErrorInner, Key, MvccTxn, ReleasedLock, Result as MvccResult, SnapshotReader, TimeStamp,
};
use crate::storage::txn::actions::check_txn_status::{
check_txn_status_missing_lock, rollback_lock, MissingLockAction,
};
use crate::storage::{Snapshot, TxnStatus};
/// Cleanup the lock if it's TTL has expired, comparing with `current_ts`. If `current_ts` is 0,
/// cleanup the lock without checking TTL. If the lock is the primary lock of a pessimistic
/// transaction, the rollback record is protected from being collapsed.
///
/// Returns the released lock. Returns error if the key is locked or has already been
/// committed.
pub fn cleanup<S: Snapshot>(
txn: &mut MvccTxn,
reader: &mut SnapshotReader<S>,
key: Key,
current_ts: TimeStamp,
protect_rollback: bool,
) -> MvccResult<Option<ReleasedLock>> {
fail_point!("cleanup", |err| Err(
crate::storage::mvcc::txn::make_txn_error(err, &key, reader.start_ts).into()
));
match reader.load_lock(&key)? {
Some(ref lock) if lock.ts == reader.start_ts => {
// If current_ts is not 0, check the Lock's TTL.
// If the lock is not expired, do not rollback it but report key is locked.
if!current_ts.is_zero() && lock.ts.physical() + lock.ttl >= current_ts.physical() {
return Err(
ErrorInner::KeyIsLocked(lock.clone().into_lock_info(key.into_raw()?)).into(),
);
}
let is_pessimistic_txn =!lock.for_update_ts.is_zero();
rollback_lock(
txn,
reader,
key,
lock,
is_pessimistic_txn,
!protect_rollback,
)
}
l => match check_txn_status_missing_lock(
txn,
reader,
key.clone(),
l,
MissingLockAction::rollback_protect(protect_rollback),
false,
)? {
TxnStatus::Committed { commit_ts } => {
MVCC_CONFLICT_COUNTER.rollback_committed.inc();
Err(ErrorInner::Committed {
start_ts: reader.start_ts,
commit_ts,
key: key.into_raw()?,
}
.into())
}
TxnStatus::RolledBack => {
// Return Ok on Rollback already exist.
MVCC_DUPLICATE_CMD_COUNTER_VEC.rollback.inc();
Ok(None)
}
TxnStatus::LockNotExist => Ok(None),
_ => unreachable!(),
},
}
}
pub mod tests {
use super::*;
use crate::storage::mvcc::tests::{must_have_write, must_not_have_write, write};
use crate::storage::mvcc::{Error as MvccError, WriteType};
use crate::storage::txn::tests::{must_commit, must_prewrite_put};
use crate::storage::Engine;
use concurrency_manager::ConcurrencyManager;
use engine_traits::CF_WRITE;
use kvproto::kvrpcpb::Context;
use txn_types::TimeStamp;
#[cfg(test)]
use crate::storage::{
mvcc::tests::{
must_get_rollback_protected, must_get_rollback_ts, must_locked, must_unlocked,
must_written,
},
txn::commands::txn_heart_beat,
txn::tests::{must_acquire_pessimistic_lock, must_pessimistic_prewrite_put},
TestEngineBuilder,
};
pub fn
|
<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) {
let ctx = Context::default();
let snapshot = engine.snapshot(Default::default()).unwrap();
let current_ts = current_ts.into();
let cm = ConcurrencyManager::new(current_ts);
let start_ts = start_ts.into();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap();
write(engine, &ctx, txn.into_modifies());
}
pub fn must_err<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) -> MvccError {
let snapshot = engine.snapshot(Default::default()).unwrap();
let current_ts = current_ts.into();
let cm = ConcurrencyManager::new(current_ts);
let start_ts = start_ts.into();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap_err()
}
pub fn must_cleanup_with_gc_fence<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
gc_fence: impl Into<TimeStamp>,
without_target_write: bool,
) {
let ctx = Context::default();
let gc_fence = gc_fence.into();
let start_ts = start_ts.into();
let current_ts = current_ts.into();
if!gc_fence.is_zero() && without_target_write {
// Put a dummy record and remove it after doing cleanup.
must_not_have_write(engine, key, gc_fence);
must_prewrite_put(engine, key, b"dummy_value", key, gc_fence.prev());
must_commit(engine, key, gc_fence.prev(), gc_fence);
}
let cm = ConcurrencyManager::new(current_ts);
let snapshot = engine.snapshot(Default::default()).unwrap();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap();
write(engine, &ctx, txn.into_modifies());
let w = must_have_write(engine, key, start_ts);
assert_ne!(w.start_ts, start_ts, "no overlapping write record");
assert!(
w.write_type!= WriteType::Rollback && w.write_type!= WriteType::Lock,
"unexpected write type {:?}",
w.write_type
);
if!gc_fence.is_zero() && without_target_write {
engine
.delete_cf(&ctx, CF_WRITE, Key::from_raw(key).append_ts(gc_fence))
.unwrap();
must_not_have_write(engine, key, gc_fence);
}
}
#[test]
fn test_must_cleanup_with_gc_fence() {
// Tests the test util
let engine = TestEngineBuilder::new().build().unwrap();
must_prewrite_put(&engine, b"k", b"v", b"k", 10);
must_commit(&engine, b"k", 10, 20);
must_cleanup_with_gc_fence(&engine, b"k", 20, 0, 30, true);
let w = must_written(&engine, b"k", 10, 20, WriteType::Put);
assert!(w.has_overlapped_rollback);
assert_eq!(w.gc_fence.unwrap(), 30.into());
}
#[test]
fn test_cleanup() {
// Cleanup's logic is mostly similar to rollback, except the TTL check. Tests that not
// related to TTL check should be covered by other test cases.
let engine = TestEngineBuilder::new().build().unwrap();
// Shorthand for composing ts.
let ts = TimeStamp::compose;
let (k, v) = (b"k", b"v");
must_prewrite_put(&engine, k, v, k, ts(10, 0));
must_locked(&engine, k, ts(10, 0));
txn_heart_beat::tests::must_success(&engine, k, ts(10, 0), 100, 100);
// Check the last txn_heart_beat has set the lock's TTL to 100.
txn_heart_beat::tests::must_success(&engine, k, ts(10, 0), 90, 100);
// TTL not expired. Do nothing but returns an error.
must_err(&engine, k, ts(10, 0), ts(20, 0));
must_locked(&engine, k, ts(10, 0));
// Try to cleanup another transaction's lock. Does nothing.
must_succeed(&engine, k, ts(10, 1), ts(120, 0));
// If there is no exisiting lock when cleanup, it may be a pessimistic transaction,
// so the rollback should be protected.
must_get_rollback_protected(&engine, k, ts(10, 1), true);
must_locked(&engine, k, ts(10, 0));
// TTL expired. The lock should be removed.
must_succeed(&engine, k, ts(10, 0), ts(120, 0));
must_unlocked(&engine, k);
// Rollbacks of optimistic transactions needn't be protected
must_get_rollback_protected(&engine, k, ts(10, 0), false);
must_get_rollback_ts(&engine, k, ts(10, 0));
// Rollbacks of primary keys in pessimistic transactions should be protected
must_acquire_pessimistic_lock(&engine, k, k, ts(11, 1), ts(12, 1));
must_succeed(&engine, k, ts(11, 1), ts(120, 0));
must_get_rollback_protected(&engine, k, ts(11, 1), true);
must_acquire_pessimistic_lock(&engine, k, k, ts(13, 1), ts(14, 1));
must_pessimistic_prewrite_put(&engine, k, v, k, ts(13, 1), ts(14, 1), true);
must_succeed(&engine, k, ts(13, 1), ts(120, 0));
must_get_rollback_protected(&engine, k, ts(13, 1), true);
}
}
|
must_succeed
|
identifier_name
|
cleanup.rs
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::mvcc::{
metrics::{MVCC_CONFLICT_COUNTER, MVCC_DUPLICATE_CMD_COUNTER_VEC},
ErrorInner, Key, MvccTxn, ReleasedLock, Result as MvccResult, SnapshotReader, TimeStamp,
};
use crate::storage::txn::actions::check_txn_status::{
check_txn_status_missing_lock, rollback_lock, MissingLockAction,
};
use crate::storage::{Snapshot, TxnStatus};
/// Cleanup the lock if it's TTL has expired, comparing with `current_ts`. If `current_ts` is 0,
/// cleanup the lock without checking TTL. If the lock is the primary lock of a pessimistic
/// transaction, the rollback record is protected from being collapsed.
///
/// Returns the released lock. Returns error if the key is locked or has already been
/// committed.
pub fn cleanup<S: Snapshot>(
txn: &mut MvccTxn,
reader: &mut SnapshotReader<S>,
key: Key,
current_ts: TimeStamp,
protect_rollback: bool,
) -> MvccResult<Option<ReleasedLock>> {
fail_point!("cleanup", |err| Err(
crate::storage::mvcc::txn::make_txn_error(err, &key, reader.start_ts).into()
));
match reader.load_lock(&key)? {
Some(ref lock) if lock.ts == reader.start_ts => {
// If current_ts is not 0, check the Lock's TTL.
// If the lock is not expired, do not rollback it but report key is locked.
if!current_ts.is_zero() && lock.ts.physical() + lock.ttl >= current_ts.physical() {
return Err(
ErrorInner::KeyIsLocked(lock.clone().into_lock_info(key.into_raw()?)).into(),
);
}
let is_pessimistic_txn =!lock.for_update_ts.is_zero();
rollback_lock(
txn,
reader,
key,
lock,
is_pessimistic_txn,
!protect_rollback,
)
}
l => match check_txn_status_missing_lock(
txn,
reader,
key.clone(),
l,
MissingLockAction::rollback_protect(protect_rollback),
false,
)? {
TxnStatus::Committed { commit_ts } => {
MVCC_CONFLICT_COUNTER.rollback_committed.inc();
Err(ErrorInner::Committed {
start_ts: reader.start_ts,
commit_ts,
key: key.into_raw()?,
}
.into())
}
TxnStatus::RolledBack => {
// Return Ok on Rollback already exist.
MVCC_DUPLICATE_CMD_COUNTER_VEC.rollback.inc();
Ok(None)
}
TxnStatus::LockNotExist => Ok(None),
_ => unreachable!(),
},
}
}
pub mod tests {
use super::*;
use crate::storage::mvcc::tests::{must_have_write, must_not_have_write, write};
use crate::storage::mvcc::{Error as MvccError, WriteType};
use crate::storage::txn::tests::{must_commit, must_prewrite_put};
use crate::storage::Engine;
use concurrency_manager::ConcurrencyManager;
use engine_traits::CF_WRITE;
use kvproto::kvrpcpb::Context;
use txn_types::TimeStamp;
#[cfg(test)]
use crate::storage::{
mvcc::tests::{
must_get_rollback_protected, must_get_rollback_ts, must_locked, must_unlocked,
must_written,
},
txn::commands::txn_heart_beat,
txn::tests::{must_acquire_pessimistic_lock, must_pessimistic_prewrite_put},
TestEngineBuilder,
};
pub fn must_succeed<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) {
let ctx = Context::default();
let snapshot = engine.snapshot(Default::default()).unwrap();
let current_ts = current_ts.into();
let cm = ConcurrencyManager::new(current_ts);
let start_ts = start_ts.into();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap();
write(engine, &ctx, txn.into_modifies());
}
pub fn must_err<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) -> MvccError {
let snapshot = engine.snapshot(Default::default()).unwrap();
let current_ts = current_ts.into();
let cm = ConcurrencyManager::new(current_ts);
let start_ts = start_ts.into();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap_err()
}
pub fn must_cleanup_with_gc_fence<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
gc_fence: impl Into<TimeStamp>,
without_target_write: bool,
) {
let ctx = Context::default();
let gc_fence = gc_fence.into();
let start_ts = start_ts.into();
let current_ts = current_ts.into();
if!gc_fence.is_zero() && without_target_write {
// Put a dummy record and remove it after doing cleanup.
must_not_have_write(engine, key, gc_fence);
must_prewrite_put(engine, key, b"dummy_value", key, gc_fence.prev());
must_commit(engine, key, gc_fence.prev(), gc_fence);
}
let cm = ConcurrencyManager::new(current_ts);
let snapshot = engine.snapshot(Default::default()).unwrap();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap();
write(engine, &ctx, txn.into_modifies());
let w = must_have_write(engine, key, start_ts);
assert_ne!(w.start_ts, start_ts, "no overlapping write record");
assert!(
w.write_type!= WriteType::Rollback && w.write_type!= WriteType::Lock,
"unexpected write type {:?}",
w.write_type
);
if!gc_fence.is_zero() && without_target_write {
engine
.delete_cf(&ctx, CF_WRITE, Key::from_raw(key).append_ts(gc_fence))
.unwrap();
must_not_have_write(engine, key, gc_fence);
}
}
#[test]
fn test_must_cleanup_with_gc_fence() {
// Tests the test util
let engine = TestEngineBuilder::new().build().unwrap();
must_prewrite_put(&engine, b"k", b"v", b"k", 10);
must_commit(&engine, b"k", 10, 20);
must_cleanup_with_gc_fence(&engine, b"k", 20, 0, 30, true);
let w = must_written(&engine, b"k", 10, 20, WriteType::Put);
assert!(w.has_overlapped_rollback);
assert_eq!(w.gc_fence.unwrap(), 30.into());
}
#[test]
fn test_cleanup() {
// Cleanup's logic is mostly similar to rollback, except the TTL check. Tests that not
// related to TTL check should be covered by other test cases.
let engine = TestEngineBuilder::new().build().unwrap();
// Shorthand for composing ts.
let ts = TimeStamp::compose;
let (k, v) = (b"k", b"v");
must_prewrite_put(&engine, k, v, k, ts(10, 0));
must_locked(&engine, k, ts(10, 0));
txn_heart_beat::tests::must_success(&engine, k, ts(10, 0), 100, 100);
// Check the last txn_heart_beat has set the lock's TTL to 100.
txn_heart_beat::tests::must_success(&engine, k, ts(10, 0), 90, 100);
// TTL not expired. Do nothing but returns an error.
must_err(&engine, k, ts(10, 0), ts(20, 0));
must_locked(&engine, k, ts(10, 0));
// Try to cleanup another transaction's lock. Does nothing.
must_succeed(&engine, k, ts(10, 1), ts(120, 0));
// If there is no exisiting lock when cleanup, it may be a pessimistic transaction,
// so the rollback should be protected.
must_get_rollback_protected(&engine, k, ts(10, 1), true);
must_locked(&engine, k, ts(10, 0));
// TTL expired. The lock should be removed.
must_succeed(&engine, k, ts(10, 0), ts(120, 0));
must_unlocked(&engine, k);
// Rollbacks of optimistic transactions needn't be protected
must_get_rollback_protected(&engine, k, ts(10, 0), false);
must_get_rollback_ts(&engine, k, ts(10, 0));
// Rollbacks of primary keys in pessimistic transactions should be protected
must_acquire_pessimistic_lock(&engine, k, k, ts(11, 1), ts(12, 1));
|
must_succeed(&engine, k, ts(13, 1), ts(120, 0));
must_get_rollback_protected(&engine, k, ts(13, 1), true);
}
}
|
must_succeed(&engine, k, ts(11, 1), ts(120, 0));
must_get_rollback_protected(&engine, k, ts(11, 1), true);
must_acquire_pessimistic_lock(&engine, k, k, ts(13, 1), ts(14, 1));
must_pessimistic_prewrite_put(&engine, k, v, k, ts(13, 1), ts(14, 1), true);
|
random_line_split
|
cleanup.rs
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::mvcc::{
metrics::{MVCC_CONFLICT_COUNTER, MVCC_DUPLICATE_CMD_COUNTER_VEC},
ErrorInner, Key, MvccTxn, ReleasedLock, Result as MvccResult, SnapshotReader, TimeStamp,
};
use crate::storage::txn::actions::check_txn_status::{
check_txn_status_missing_lock, rollback_lock, MissingLockAction,
};
use crate::storage::{Snapshot, TxnStatus};
/// Cleanup the lock if it's TTL has expired, comparing with `current_ts`. If `current_ts` is 0,
/// cleanup the lock without checking TTL. If the lock is the primary lock of a pessimistic
/// transaction, the rollback record is protected from being collapsed.
///
/// Returns the released lock. Returns error if the key is locked or has already been
/// committed.
pub fn cleanup<S: Snapshot>(
txn: &mut MvccTxn,
reader: &mut SnapshotReader<S>,
key: Key,
current_ts: TimeStamp,
protect_rollback: bool,
) -> MvccResult<Option<ReleasedLock>>
|
lock,
is_pessimistic_txn,
!protect_rollback,
)
}
l => match check_txn_status_missing_lock(
txn,
reader,
key.clone(),
l,
MissingLockAction::rollback_protect(protect_rollback),
false,
)? {
TxnStatus::Committed { commit_ts } => {
MVCC_CONFLICT_COUNTER.rollback_committed.inc();
Err(ErrorInner::Committed {
start_ts: reader.start_ts,
commit_ts,
key: key.into_raw()?,
}
.into())
}
TxnStatus::RolledBack => {
// Return Ok on Rollback already exist.
MVCC_DUPLICATE_CMD_COUNTER_VEC.rollback.inc();
Ok(None)
}
TxnStatus::LockNotExist => Ok(None),
_ => unreachable!(),
},
}
}
pub mod tests {
use super::*;
use crate::storage::mvcc::tests::{must_have_write, must_not_have_write, write};
use crate::storage::mvcc::{Error as MvccError, WriteType};
use crate::storage::txn::tests::{must_commit, must_prewrite_put};
use crate::storage::Engine;
use concurrency_manager::ConcurrencyManager;
use engine_traits::CF_WRITE;
use kvproto::kvrpcpb::Context;
use txn_types::TimeStamp;
#[cfg(test)]
use crate::storage::{
mvcc::tests::{
must_get_rollback_protected, must_get_rollback_ts, must_locked, must_unlocked,
must_written,
},
txn::commands::txn_heart_beat,
txn::tests::{must_acquire_pessimistic_lock, must_pessimistic_prewrite_put},
TestEngineBuilder,
};
pub fn must_succeed<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) {
let ctx = Context::default();
let snapshot = engine.snapshot(Default::default()).unwrap();
let current_ts = current_ts.into();
let cm = ConcurrencyManager::new(current_ts);
let start_ts = start_ts.into();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap();
write(engine, &ctx, txn.into_modifies());
}
pub fn must_err<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
) -> MvccError {
let snapshot = engine.snapshot(Default::default()).unwrap();
let current_ts = current_ts.into();
let cm = ConcurrencyManager::new(current_ts);
let start_ts = start_ts.into();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap_err()
}
pub fn must_cleanup_with_gc_fence<E: Engine>(
engine: &E,
key: &[u8],
start_ts: impl Into<TimeStamp>,
current_ts: impl Into<TimeStamp>,
gc_fence: impl Into<TimeStamp>,
without_target_write: bool,
) {
let ctx = Context::default();
let gc_fence = gc_fence.into();
let start_ts = start_ts.into();
let current_ts = current_ts.into();
if!gc_fence.is_zero() && without_target_write {
// Put a dummy record and remove it after doing cleanup.
must_not_have_write(engine, key, gc_fence);
must_prewrite_put(engine, key, b"dummy_value", key, gc_fence.prev());
must_commit(engine, key, gc_fence.prev(), gc_fence);
}
let cm = ConcurrencyManager::new(current_ts);
let snapshot = engine.snapshot(Default::default()).unwrap();
let mut txn = MvccTxn::new(start_ts, cm);
let mut reader = SnapshotReader::new(start_ts, snapshot, true);
cleanup(&mut txn, &mut reader, Key::from_raw(key), current_ts, true).unwrap();
write(engine, &ctx, txn.into_modifies());
let w = must_have_write(engine, key, start_ts);
assert_ne!(w.start_ts, start_ts, "no overlapping write record");
assert!(
w.write_type!= WriteType::Rollback && w.write_type!= WriteType::Lock,
"unexpected write type {:?}",
w.write_type
);
if!gc_fence.is_zero() && without_target_write {
engine
.delete_cf(&ctx, CF_WRITE, Key::from_raw(key).append_ts(gc_fence))
.unwrap();
must_not_have_write(engine, key, gc_fence);
}
}
#[test]
fn test_must_cleanup_with_gc_fence() {
// Tests the test util
let engine = TestEngineBuilder::new().build().unwrap();
must_prewrite_put(&engine, b"k", b"v", b"k", 10);
must_commit(&engine, b"k", 10, 20);
must_cleanup_with_gc_fence(&engine, b"k", 20, 0, 30, true);
let w = must_written(&engine, b"k", 10, 20, WriteType::Put);
assert!(w.has_overlapped_rollback);
assert_eq!(w.gc_fence.unwrap(), 30.into());
}
#[test]
fn test_cleanup() {
// Cleanup's logic is mostly similar to rollback, except the TTL check. Tests that not
// related to TTL check should be covered by other test cases.
let engine = TestEngineBuilder::new().build().unwrap();
// Shorthand for composing ts.
let ts = TimeStamp::compose;
let (k, v) = (b"k", b"v");
must_prewrite_put(&engine, k, v, k, ts(10, 0));
must_locked(&engine, k, ts(10, 0));
txn_heart_beat::tests::must_success(&engine, k, ts(10, 0), 100, 100);
// Check the last txn_heart_beat has set the lock's TTL to 100.
txn_heart_beat::tests::must_success(&engine, k, ts(10, 0), 90, 100);
// TTL not expired. Do nothing but returns an error.
must_err(&engine, k, ts(10, 0), ts(20, 0));
must_locked(&engine, k, ts(10, 0));
// Try to cleanup another transaction's lock. Does nothing.
must_succeed(&engine, k, ts(10, 1), ts(120, 0));
// If there is no exisiting lock when cleanup, it may be a pessimistic transaction,
// so the rollback should be protected.
must_get_rollback_protected(&engine, k, ts(10, 1), true);
must_locked(&engine, k, ts(10, 0));
// TTL expired. The lock should be removed.
must_succeed(&engine, k, ts(10, 0), ts(120, 0));
must_unlocked(&engine, k);
// Rollbacks of optimistic transactions needn't be protected
must_get_rollback_protected(&engine, k, ts(10, 0), false);
must_get_rollback_ts(&engine, k, ts(10, 0));
// Rollbacks of primary keys in pessimistic transactions should be protected
must_acquire_pessimistic_lock(&engine, k, k, ts(11, 1), ts(12, 1));
must_succeed(&engine, k, ts(11, 1), ts(120, 0));
must_get_rollback_protected(&engine, k, ts(11, 1), true);
must_acquire_pessimistic_lock(&engine, k, k, ts(13, 1), ts(14, 1));
must_pessimistic_prewrite_put(&engine, k, v, k, ts(13, 1), ts(14, 1), true);
must_succeed(&engine, k, ts(13, 1), ts(120, 0));
must_get_rollback_protected(&engine, k, ts(13, 1), true);
}
}
|
{
fail_point!("cleanup", |err| Err(
crate::storage::mvcc::txn::make_txn_error(err, &key, reader.start_ts).into()
));
match reader.load_lock(&key)? {
Some(ref lock) if lock.ts == reader.start_ts => {
// If current_ts is not 0, check the Lock's TTL.
// If the lock is not expired, do not rollback it but report key is locked.
if !current_ts.is_zero() && lock.ts.physical() + lock.ttl >= current_ts.physical() {
return Err(
ErrorInner::KeyIsLocked(lock.clone().into_lock_info(key.into_raw()?)).into(),
);
}
let is_pessimistic_txn = !lock.for_update_ts.is_zero();
rollback_lock(
txn,
reader,
key,
|
identifier_body
|
generic-object.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo<T> {
fn get(&self) -> T;
}
struct S {
x: int
}
impl Foo<int> for S {
fn get(&self) -> int {
self.x
}
}
pub fn
|
() {
let x = @S { x: 1 };
let y = x as @Foo<int>;
assert!(y.get() == 1);
}
|
main
|
identifier_name
|
generic-object.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo<T> {
fn get(&self) -> T;
}
struct S {
x: int
}
impl Foo<int> for S {
fn get(&self) -> int
|
}
pub fn main() {
let x = @S { x: 1 };
let y = x as @Foo<int>;
assert!(y.get() == 1);
}
|
{
self.x
}
|
identifier_body
|
macros.rs
|
/// Shortcut to generate an ESC-prefixed CSI sequence
macro_rules! csi {
($( $l:expr ),*) => {
concat!("\x1b[", $( $l ),*)
};
}
/// Generates an empty struct that prints as a CSI sequence
macro_rules! generate_csi_struct {
($name:ident, $value:expr) => {
pub struct $name;
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, csi!($value))
}
}
};
($name:ident, $value:expr, u16) => {
pub struct $name(pub u16);
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, csi!("{}", $value), self.0)
}
}
};
}
#[cfg(test)]
mod tests {
#[test]
fn
|
() {
assert_eq!(csi!("123"), "\x1b[123");
}
}
|
csi
|
identifier_name
|
macros.rs
|
/// Shortcut to generate an ESC-prefixed CSI sequence
macro_rules! csi {
($( $l:expr ),*) => {
concat!("\x1b[", $( $l ),*)
};
}
/// Generates an empty struct that prints as a CSI sequence
macro_rules! generate_csi_struct {
($name:ident, $value:expr) => {
pub struct $name;
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, csi!($value))
}
}
};
($name:ident, $value:expr, u16) => {
pub struct $name(pub u16);
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, csi!("{}", $value), self.0)
}
}
};
}
#[cfg(test)]
mod tests {
#[test]
fn csi()
|
}
|
{
assert_eq!(csi!("123"), "\x1b[123");
}
|
identifier_body
|
macros.rs
|
/// Shortcut to generate an ESC-prefixed CSI sequence
macro_rules! csi {
($( $l:expr ),*) => {
concat!("\x1b[", $( $l ),*)
};
}
/// Generates an empty struct that prints as a CSI sequence
macro_rules! generate_csi_struct {
($name:ident, $value:expr) => {
pub struct $name;
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, csi!($value))
}
}
};
($name:ident, $value:expr, u16) => {
pub struct $name(pub u16);
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, csi!("{}", $value), self.0)
|
#[cfg(test)]
mod tests {
#[test]
fn csi() {
assert_eq!(csi!("123"), "\x1b[123");
}
}
|
}
}
};
}
|
random_line_split
|
path.rs
|
use anyhow::Result;
use once_cell::sync::Lazy;
use std::fs;
use std::path::{Path, PathBuf};
#[derive(Copy, Clone)]
pub enum FilePath {
LocalData,
}
impl FilePath {
fn path(self) -> PathBuf {
static LOCAL_DATA_PATH: Lazy<PathBuf> = Lazy::new(|| {
let mut dir =
dirs_next::data_local_dir().unwrap_or_else(|| PathBuf::from("~/.local/share/"));
dir.push(env!("CARGO_PKG_NAME"));
dir
});
match self {
Self::LocalData => LOCAL_DATA_PATH.clone(),
}
}
pub fn
|
<P>(self, subdir: P) -> Result<PathBuf>
where
P: AsRef<Path>,
{
let mut dir = self.path();
dir.push(subdir);
validate_dir(&dir)?;
Ok(dir)
}
}
pub fn validate_dir<P>(dir: P) -> Result<()>
where
P: AsRef<Path>,
{
let dir = dir.as_ref();
if!dir.exists() {
fs::create_dir_all(&dir)?;
}
Ok(())
}
|
validated_subdir
|
identifier_name
|
path.rs
|
use anyhow::Result;
use once_cell::sync::Lazy;
use std::fs;
use std::path::{Path, PathBuf};
#[derive(Copy, Clone)]
|
fn path(self) -> PathBuf {
static LOCAL_DATA_PATH: Lazy<PathBuf> = Lazy::new(|| {
let mut dir =
dirs_next::data_local_dir().unwrap_or_else(|| PathBuf::from("~/.local/share/"));
dir.push(env!("CARGO_PKG_NAME"));
dir
});
match self {
Self::LocalData => LOCAL_DATA_PATH.clone(),
}
}
pub fn validated_subdir<P>(self, subdir: P) -> Result<PathBuf>
where
P: AsRef<Path>,
{
let mut dir = self.path();
dir.push(subdir);
validate_dir(&dir)?;
Ok(dir)
}
}
pub fn validate_dir<P>(dir: P) -> Result<()>
where
P: AsRef<Path>,
{
let dir = dir.as_ref();
if!dir.exists() {
fs::create_dir_all(&dir)?;
}
Ok(())
}
|
pub enum FilePath {
LocalData,
}
impl FilePath {
|
random_line_split
|
nul-characters.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main()
{
let all_nuls1 = "\0\x00\u{0}\u{0}";
let all_nuls2 = "\u{0}\u{0}\x00\0";
let all_nuls3 = "\u{0}\u{0}\x00\0";
let all_nuls4 = "\x00\u{0}\0\u{0}";
// sizes for two should suffice
assert_eq!(all_nuls1.len(), 4);
assert_eq!(all_nuls2.len(), 4);
// string equality should pass between the strings
assert_eq!(all_nuls1, all_nuls2);
assert_eq!(all_nuls2, all_nuls3);
assert_eq!(all_nuls3, all_nuls4);
// all extracted characters in all_nuls are equivalent to each other
for c1 in all_nuls1.chars()
{
for c2 in all_nuls1.chars()
{
assert_eq!(c1,c2);
}
}
// testing equality between explicit character literals
assert_eq!('\0', '\x00');
assert_eq!('\u{0}', '\x00');
assert_eq!('\u{0}', '\u{0}');
// NUL characters should make a difference
assert!("Hello World"!= "Hello \0World");
assert!("Hello World"!= "Hello World\0");
}
|
random_line_split
|
|
nul-characters.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main()
|
assert_eq!(c1,c2);
}
}
// testing equality between explicit character literals
assert_eq!('\0', '\x00');
assert_eq!('\u{0}', '\x00');
assert_eq!('\u{0}', '\u{0}');
// NUL characters should make a difference
assert!("Hello World"!= "Hello \0World");
assert!("Hello World"!= "Hello World\0");
}
|
{
let all_nuls1 = "\0\x00\u{0}\u{0}";
let all_nuls2 = "\u{0}\u{0}\x00\0";
let all_nuls3 = "\u{0}\u{0}\x00\0";
let all_nuls4 = "\x00\u{0}\0\u{0}";
// sizes for two should suffice
assert_eq!(all_nuls1.len(), 4);
assert_eq!(all_nuls2.len(), 4);
// string equality should pass between the strings
assert_eq!(all_nuls1, all_nuls2);
assert_eq!(all_nuls2, all_nuls3);
assert_eq!(all_nuls3, all_nuls4);
// all extracted characters in all_nuls are equivalent to each other
for c1 in all_nuls1.chars()
{
for c2 in all_nuls1.chars()
{
|
identifier_body
|
nul-characters.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn
|
()
{
let all_nuls1 = "\0\x00\u{0}\u{0}";
let all_nuls2 = "\u{0}\u{0}\x00\0";
let all_nuls3 = "\u{0}\u{0}\x00\0";
let all_nuls4 = "\x00\u{0}\0\u{0}";
// sizes for two should suffice
assert_eq!(all_nuls1.len(), 4);
assert_eq!(all_nuls2.len(), 4);
// string equality should pass between the strings
assert_eq!(all_nuls1, all_nuls2);
assert_eq!(all_nuls2, all_nuls3);
assert_eq!(all_nuls3, all_nuls4);
// all extracted characters in all_nuls are equivalent to each other
for c1 in all_nuls1.chars()
{
for c2 in all_nuls1.chars()
{
assert_eq!(c1,c2);
}
}
// testing equality between explicit character literals
assert_eq!('\0', '\x00');
assert_eq!('\u{0}', '\x00');
assert_eq!('\u{0}', '\u{0}');
// NUL characters should make a difference
assert!("Hello World"!= "Hello \0World");
assert!("Hello World"!= "Hello World\0");
}
|
main
|
identifier_name
|
win.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Windows console handling
// FIXME (#13400): this is only a tiny fraction of the win32 console api
extern crate libc;
use std::io::IoResult;
use attr;
use color;
use Terminal;
/// A Terminal implementation which uses the Win32 Console API.
pub struct WinConsole<T> {
buf: T,
foreground: color::Color,
background: color::Color,
}
#[allow(non_snake_case_functions)]
#[link(name = "kernel32")]
extern "system" {
fn SetConsoleTextAttribute(handle: libc::HANDLE, attr: libc::WORD) -> libc::BOOL;
fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
}
fn color_to_bits(color: color::Color) -> u16 {
// magic numbers from mingw-w64's wincon.h
let bits = match color % 8 {
color::BLACK => 0,
color::BLUE => 0x1,
color::GREEN => 0x2,
color::RED => 0x4,
color::YELLOW => 0x2 | 0x4,
color::MAGENTA => 0x1 | 0x4,
color::CYAN => 0x1 | 0x2,
color::WHITE => 0x1 | 0x2 | 0x4,
_ => unreachable!()
};
if color >= 8 {
bits | 0x8
} else {
bits
|
}
impl<T: Writer> WinConsole<T> {
fn apply(&mut self) {
let _unused = self.buf.flush();
let mut accum: libc::WORD = 0;
accum |= color_to_bits(self.foreground);
accum |= color_to_bits(self.background) << 4;
unsafe {
// Magic -11 means stdout, from
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231%28v=vs.85%29.aspx
//
// You may be wondering, "but what about stderr?", and the answer
// to that is that setting terminal attributes on the stdout
// handle also sets them for stderr, since they go to the same
// terminal! Admittedly, this is fragile, since stderr could be
// redirected to a different console. This is good enough for
// rustc though. See #13400.
let out = GetStdHandle(-11);
SetConsoleTextAttribute(out, accum);
}
}
}
impl<T: Writer> Writer for WinConsole<T> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.buf.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.buf.flush()
}
}
impl<T: Writer> Terminal<T> for WinConsole<T> {
fn new(out: T) -> Option<WinConsole<T>> {
Some(WinConsole { buf: out, foreground: color::WHITE, background: color::BLACK })
}
fn fg(&mut self, color: color::Color) -> IoResult<bool> {
self.foreground = color;
self.apply();
Ok(true)
}
fn bg(&mut self, color: color::Color) -> IoResult<bool> {
self.background = color;
self.apply();
Ok(true)
}
fn attr(&mut self, attr: attr::Attr) -> IoResult<bool> {
match attr {
attr::ForegroundColor(f) => {
self.foreground = f;
self.apply();
Ok(true)
},
attr::BackgroundColor(b) => {
self.background = b;
self.apply();
Ok(true)
},
_ => Ok(false)
}
}
fn supports_attr(&self, attr: attr::Attr) -> bool {
// it claims support for underscore and reverse video, but I can't get
// it to do anything -cmr
match attr {
attr::ForegroundColor(_) | attr::BackgroundColor(_) => true,
_ => false
}
}
fn reset(&mut self) -> IoResult<()> {
self.foreground = color::WHITE;
self.background = color::BLACK;
self.apply();
Ok(())
}
fn unwrap(self) -> T { self.buf }
fn get_ref<'a>(&'a self) -> &'a T { &self.buf }
fn get_mut<'a>(&'a mut self) -> &'a mut T { &mut self.buf }
}
|
}
|
random_line_split
|
win.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Windows console handling
// FIXME (#13400): this is only a tiny fraction of the win32 console api
extern crate libc;
use std::io::IoResult;
use attr;
use color;
use Terminal;
/// A Terminal implementation which uses the Win32 Console API.
pub struct WinConsole<T> {
buf: T,
foreground: color::Color,
background: color::Color,
}
#[allow(non_snake_case_functions)]
#[link(name = "kernel32")]
extern "system" {
fn SetConsoleTextAttribute(handle: libc::HANDLE, attr: libc::WORD) -> libc::BOOL;
fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
}
fn color_to_bits(color: color::Color) -> u16 {
// magic numbers from mingw-w64's wincon.h
let bits = match color % 8 {
color::BLACK => 0,
color::BLUE => 0x1,
color::GREEN => 0x2,
color::RED => 0x4,
color::YELLOW => 0x2 | 0x4,
color::MAGENTA => 0x1 | 0x4,
color::CYAN => 0x1 | 0x2,
color::WHITE => 0x1 | 0x2 | 0x4,
_ => unreachable!()
};
if color >= 8 {
bits | 0x8
} else {
bits
}
}
impl<T: Writer> WinConsole<T> {
fn apply(&mut self) {
let _unused = self.buf.flush();
let mut accum: libc::WORD = 0;
accum |= color_to_bits(self.foreground);
accum |= color_to_bits(self.background) << 4;
unsafe {
// Magic -11 means stdout, from
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231%28v=vs.85%29.aspx
//
// You may be wondering, "but what about stderr?", and the answer
// to that is that setting terminal attributes on the stdout
// handle also sets them for stderr, since they go to the same
// terminal! Admittedly, this is fragile, since stderr could be
// redirected to a different console. This is good enough for
// rustc though. See #13400.
let out = GetStdHandle(-11);
SetConsoleTextAttribute(out, accum);
}
}
}
impl<T: Writer> Writer for WinConsole<T> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.buf.write(buf)
}
fn flush(&mut self) -> IoResult<()>
|
}
impl<T: Writer> Terminal<T> for WinConsole<T> {
fn new(out: T) -> Option<WinConsole<T>> {
Some(WinConsole { buf: out, foreground: color::WHITE, background: color::BLACK })
}
fn fg(&mut self, color: color::Color) -> IoResult<bool> {
self.foreground = color;
self.apply();
Ok(true)
}
fn bg(&mut self, color: color::Color) -> IoResult<bool> {
self.background = color;
self.apply();
Ok(true)
}
fn attr(&mut self, attr: attr::Attr) -> IoResult<bool> {
match attr {
attr::ForegroundColor(f) => {
self.foreground = f;
self.apply();
Ok(true)
},
attr::BackgroundColor(b) => {
self.background = b;
self.apply();
Ok(true)
},
_ => Ok(false)
}
}
fn supports_attr(&self, attr: attr::Attr) -> bool {
// it claims support for underscore and reverse video, but I can't get
// it to do anything -cmr
match attr {
attr::ForegroundColor(_) | attr::BackgroundColor(_) => true,
_ => false
}
}
fn reset(&mut self) -> IoResult<()> {
self.foreground = color::WHITE;
self.background = color::BLACK;
self.apply();
Ok(())
}
fn unwrap(self) -> T { self.buf }
fn get_ref<'a>(&'a self) -> &'a T { &self.buf }
fn get_mut<'a>(&'a mut self) -> &'a mut T { &mut self.buf }
}
|
{
self.buf.flush()
}
|
identifier_body
|
win.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Windows console handling
// FIXME (#13400): this is only a tiny fraction of the win32 console api
extern crate libc;
use std::io::IoResult;
use attr;
use color;
use Terminal;
/// A Terminal implementation which uses the Win32 Console API.
pub struct WinConsole<T> {
buf: T,
foreground: color::Color,
background: color::Color,
}
#[allow(non_snake_case_functions)]
#[link(name = "kernel32")]
extern "system" {
fn SetConsoleTextAttribute(handle: libc::HANDLE, attr: libc::WORD) -> libc::BOOL;
fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
}
fn color_to_bits(color: color::Color) -> u16 {
// magic numbers from mingw-w64's wincon.h
let bits = match color % 8 {
color::BLACK => 0,
color::BLUE => 0x1,
color::GREEN => 0x2,
color::RED => 0x4,
color::YELLOW => 0x2 | 0x4,
color::MAGENTA => 0x1 | 0x4,
color::CYAN => 0x1 | 0x2,
color::WHITE => 0x1 | 0x2 | 0x4,
_ => unreachable!()
};
if color >= 8 {
bits | 0x8
} else {
bits
}
}
impl<T: Writer> WinConsole<T> {
fn
|
(&mut self) {
let _unused = self.buf.flush();
let mut accum: libc::WORD = 0;
accum |= color_to_bits(self.foreground);
accum |= color_to_bits(self.background) << 4;
unsafe {
// Magic -11 means stdout, from
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231%28v=vs.85%29.aspx
//
// You may be wondering, "but what about stderr?", and the answer
// to that is that setting terminal attributes on the stdout
// handle also sets them for stderr, since they go to the same
// terminal! Admittedly, this is fragile, since stderr could be
// redirected to a different console. This is good enough for
// rustc though. See #13400.
let out = GetStdHandle(-11);
SetConsoleTextAttribute(out, accum);
}
}
}
impl<T: Writer> Writer for WinConsole<T> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.buf.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.buf.flush()
}
}
impl<T: Writer> Terminal<T> for WinConsole<T> {
fn new(out: T) -> Option<WinConsole<T>> {
Some(WinConsole { buf: out, foreground: color::WHITE, background: color::BLACK })
}
fn fg(&mut self, color: color::Color) -> IoResult<bool> {
self.foreground = color;
self.apply();
Ok(true)
}
fn bg(&mut self, color: color::Color) -> IoResult<bool> {
self.background = color;
self.apply();
Ok(true)
}
fn attr(&mut self, attr: attr::Attr) -> IoResult<bool> {
match attr {
attr::ForegroundColor(f) => {
self.foreground = f;
self.apply();
Ok(true)
},
attr::BackgroundColor(b) => {
self.background = b;
self.apply();
Ok(true)
},
_ => Ok(false)
}
}
fn supports_attr(&self, attr: attr::Attr) -> bool {
// it claims support for underscore and reverse video, but I can't get
// it to do anything -cmr
match attr {
attr::ForegroundColor(_) | attr::BackgroundColor(_) => true,
_ => false
}
}
fn reset(&mut self) -> IoResult<()> {
self.foreground = color::WHITE;
self.background = color::BLACK;
self.apply();
Ok(())
}
fn unwrap(self) -> T { self.buf }
fn get_ref<'a>(&'a self) -> &'a T { &self.buf }
fn get_mut<'a>(&'a mut self) -> &'a mut T { &mut self.buf }
}
|
apply
|
identifier_name
|
22-code.rs
|
fn
|
() {
let x = 5u8;
let y = "Cold".to_string();
call_method(x);
call_method(y);
// dynamit dispatch
let x1: u8 = 1;
let y1: String = "Cool".to_string();
let x1obj = TraitObj {
data: &x1,
vtable: &F1_for_u8_vtable
};
let y1obj = TraitObj {
data: &y1,
vtable: &F1_for_String_vtable
};
(x1obj.vtable.method)(x1.data);
(y1obj.vtable.method)(y1.data);
}
trait F1 {
fn method(&self) -> String;
}
impl F1 for u8 {
fn method(&self) -> String { format!("u8 : {}", *self) }
// * 什么意思
}
impl F1 for String {
fn method(&self) -> String { format!("string : {}", *self) }
}
fn call_method<T:F1>(x: T) {
println!("{}", x.method());
}
// dynamic dispatch
pub struct TraitObj {
// two pointer
pub data: *mut (), // addresses the data of type T
pub vtable: *mut (), // vtable(virtual method table) Foo 在 T 上的实现
// a struct of function pointers, pointing to the concrete piece of machine code for each method in the implementation
}
struct F1Vtable {
destructor: fn(*mut ()),
size: usize,
align: usize,
mtehod: fn(*const ()) -> String,
}
fn call_method_on_u8(x: *const ()) -> String {
let byte: &u8 = unsafe { &*(x as *const u8) };
byte.method()
}
static F1_for_u8_vtable: F1Vtable = F1Vtable {
destructor: /* */,
size: 1,
align: 1,
method: call_method_on_u8 as fn(*const ()) -> String,
}
fn call_method_on_String(x: *const ()) -> String {
let string: &String = unsafe { &*(x as *const String) };
string.method()
}
static F1_for_String_vtable: F1Vtable = F1Vtable {
destructor: /* */,
size: 24,
align: 8,
method: call_method_on_String as fn(*const ()) -> String,
}
|
main
|
identifier_name
|
22-code.rs
|
fn main() {
let x = 5u8;
let y = "Cold".to_string();
call_method(x);
call_method(y);
// dynamit dispatch
let x1: u8 = 1;
let y1: String = "Cool".to_string();
let x1obj = TraitObj {
data: &x1,
vtable: &F1_for_u8_vtable
};
let y1obj = TraitObj {
data: &y1,
vtable: &F1_for_String_vtable
};
(x1obj.vtable.method)(x1.data);
(y1obj.vtable.method)(y1.data);
}
trait F1 {
fn method(&self) -> String;
}
impl F1 for u8 {
fn method(&self) -> String { format!("u8 : {}", *self) }
// * 什么意思
}
impl F1 for String {
fn method(&self) -> String { format!("string : {}", *self) }
}
fn call_method<T:F1>(x: T) {
println!("{}", x.method());
}
// dynamic dispatch
pub struct TraitObj {
// two pointer
pub data: *mut (), // addresses the data of type T
pub vtable: *mut (), // vtable(virtual method table) Foo 在 T 上的实现
// a struct of function pointers, pointing to the concrete piece of machine code for each method in the implementation
}
struct F1Vtable {
destructor: fn(*mut ()),
size: usize,
align: usize,
mtehod: fn(*const ()) -> String,
}
fn call_method_on_u8(x: *const ()) -> String {
let byte: &u
|
_vtable: F1Vtable = F1Vtable {
destructor: /* */,
size: 1,
align: 1,
method: call_method_on_u8 as fn(*const ()) -> String,
}
fn call_method_on_String(x: *const ()) -> String {
let string: &String = unsafe { &*(x as *const String) };
string.method()
}
static F1_for_String_vtable: F1Vtable = F1Vtable {
destructor: /* */,
size: 24,
align: 8,
method: call_method_on_String as fn(*const ()) -> String,
}
|
8 = unsafe { &*(x as *const u8) };
byte.method()
}
static F1_for_u8
|
identifier_body
|
22-code.rs
|
fn main() {
let x = 5u8;
let y = "Cold".to_string();
call_method(x);
call_method(y);
// dynamit dispatch
let x1: u8 = 1;
let y1: String = "Cool".to_string();
let x1obj = TraitObj {
data: &x1,
vtable: &F1_for_u8_vtable
};
let y1obj = TraitObj {
data: &y1,
vtable: &F1_for_String_vtable
};
(x1obj.vtable.method)(x1.data);
(y1obj.vtable.method)(y1.data);
}
trait F1 {
fn method(&self) -> String;
}
impl F1 for u8 {
fn method(&self) -> String { format!("u8 : {}", *self) }
// * 什么意思
}
|
fn call_method<T:F1>(x: T) {
println!("{}", x.method());
}
// dynamic dispatch
pub struct TraitObj {
// two pointer
pub data: *mut (), // addresses the data of type T
pub vtable: *mut (), // vtable(virtual method table) Foo 在 T 上的实现
// a struct of function pointers, pointing to the concrete piece of machine code for each method in the implementation
}
struct F1Vtable {
destructor: fn(*mut ()),
size: usize,
align: usize,
mtehod: fn(*const ()) -> String,
}
fn call_method_on_u8(x: *const ()) -> String {
let byte: &u8 = unsafe { &*(x as *const u8) };
byte.method()
}
static F1_for_u8_vtable: F1Vtable = F1Vtable {
destructor: /* */,
size: 1,
align: 1,
method: call_method_on_u8 as fn(*const ()) -> String,
}
fn call_method_on_String(x: *const ()) -> String {
let string: &String = unsafe { &*(x as *const String) };
string.method()
}
static F1_for_String_vtable: F1Vtable = F1Vtable {
destructor: /* */,
size: 24,
align: 8,
method: call_method_on_String as fn(*const ()) -> String,
}
|
impl F1 for String {
fn method(&self) -> String { format!("string : {}", *self) }
}
|
random_line_split
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::{BTreeMap, Vec};
use spin::Mutex;
use arch;
use context::file::File;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use syscall::data::Event;
use sync::{WaitMap, WaitQueue};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Exited(usize)
}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: usize,
/// The ID of the parent context
pub ppid: usize,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<usize, usize>>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::context::Context,
/// Kernel FX
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User Tls
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Vec<u8>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<File>>>>
}
impl Context {
/// Create a new context
pub fn new(id: usize) -> Context {
Context {
id: id,
ppid: 0,
ruid: 0,
rgid: 0,
euid: 0,
egid: 0,
status: Status::Blocked,
running: false,
cpu_id: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
wake: None,
arch: arch::context::Context::new(),
kfx: None,
kstack: None,
image: Vec::new(),
heap: None,
stack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new()))
}
}
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
if path == b"." {
cwd.clone()
} else if path == b".." {
cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec()
} else if path.starts_with(b"./") {
let mut canon = cwd.clone();
if! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path[2..]);
canon
} else if path.starts_with(b"../") {
let mut canon = cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec();
canon.extend_from_slice(&path[3..]);
canon
} else if path.starts_with(b"/") {
let mut canon = cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec();
canon.extend_from_slice(&path);
canon
} else {
let mut canon = cwd.clone();
if! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path);
canon
}
} else {
path.to_vec()
}
}
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { arch::device::local_apic::LOCAL_APIC.ipi(cpu_id) };
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: File) -> Option<usize> {
let mut files = self.files.lock();
for (i, mut file_option) in files.iter_mut().enumerate() {
if file_option.is_none() {
*file_option = Some(file);
return Some(i);
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
files.push(Some(file));
Some(len)
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: usize) -> Option<File> {
let files = self.files.lock();
if i < files.len() {
files[i]
} else {
|
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: usize) -> Option<File> {
let mut files = self.files.lock();
if i < files.len() {
files[i].take()
} else {
None
}
}
}
|
None
}
}
|
random_line_split
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::{BTreeMap, Vec};
use spin::Mutex;
use arch;
use context::file::File;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use syscall::data::Event;
use sync::{WaitMap, WaitQueue};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum
|
{
Runnable,
Blocked,
Exited(usize)
}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: usize,
/// The ID of the parent context
pub ppid: usize,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<usize, usize>>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::context::Context,
/// Kernel FX
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User Tls
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Vec<u8>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<File>>>>
}
impl Context {
/// Create a new context
pub fn new(id: usize) -> Context {
Context {
id: id,
ppid: 0,
ruid: 0,
rgid: 0,
euid: 0,
egid: 0,
status: Status::Blocked,
running: false,
cpu_id: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
wake: None,
arch: arch::context::Context::new(),
kfx: None,
kstack: None,
image: Vec::new(),
heap: None,
stack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new()))
}
}
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
if path == b"." {
cwd.clone()
} else if path == b".." {
cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec()
} else if path.starts_with(b"./") {
let mut canon = cwd.clone();
if! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path[2..]);
canon
} else if path.starts_with(b"../") {
let mut canon = cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec();
canon.extend_from_slice(&path[3..]);
canon
} else if path.starts_with(b"/") {
let mut canon = cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec();
canon.extend_from_slice(&path);
canon
} else {
let mut canon = cwd.clone();
if! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path);
canon
}
} else {
path.to_vec()
}
}
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { arch::device::local_apic::LOCAL_APIC.ipi(cpu_id) };
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: File) -> Option<usize> {
let mut files = self.files.lock();
for (i, mut file_option) in files.iter_mut().enumerate() {
if file_option.is_none() {
*file_option = Some(file);
return Some(i);
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
files.push(Some(file));
Some(len)
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: usize) -> Option<File> {
let files = self.files.lock();
if i < files.len() {
files[i]
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: usize) -> Option<File> {
let mut files = self.files.lock();
if i < files.len() {
files[i].take()
} else {
None
}
}
}
|
Status
|
identifier_name
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::{BTreeMap, Vec};
use spin::Mutex;
use arch;
use context::file::File;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use syscall::data::Event;
use sync::{WaitMap, WaitQueue};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Exited(usize)
}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: usize,
/// The ID of the parent context
pub ppid: usize,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<usize, usize>>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::context::Context,
/// Kernel FX
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User Tls
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Vec<u8>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<File>>>>
}
impl Context {
/// Create a new context
pub fn new(id: usize) -> Context {
Context {
id: id,
ppid: 0,
ruid: 0,
rgid: 0,
euid: 0,
egid: 0,
status: Status::Blocked,
running: false,
cpu_id: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
wake: None,
arch: arch::context::Context::new(),
kfx: None,
kstack: None,
image: Vec::new(),
heap: None,
stack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new()))
}
}
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
if path == b"." {
cwd.clone()
} else if path == b".." {
cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec()
} else if path.starts_with(b"./") {
let mut canon = cwd.clone();
if! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path[2..]);
canon
} else if path.starts_with(b"../") {
let mut canon = cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec();
canon.extend_from_slice(&path[3..]);
canon
} else if path.starts_with(b"/") {
let mut canon = cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec();
canon.extend_from_slice(&path);
canon
} else {
let mut canon = cwd.clone();
if! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path);
canon
}
} else {
path.to_vec()
}
}
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { arch::device::local_apic::LOCAL_APIC.ipi(cpu_id) };
}
}
true
} else
|
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: File) -> Option<usize> {
let mut files = self.files.lock();
for (i, mut file_option) in files.iter_mut().enumerate() {
if file_option.is_none() {
*file_option = Some(file);
return Some(i);
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
files.push(Some(file));
Some(len)
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: usize) -> Option<File> {
let files = self.files.lock();
if i < files.len() {
files[i]
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: usize) -> Option<File> {
let mut files = self.files.lock();
if i < files.len() {
files[i].take()
} else {
None
}
}
}
|
{
false
}
|
conditional_block
|
build_gecko.rs
|
}
#[cfg(feature = "bindgen")]
mod bindings {
use bindgen::{Builder, CodegenConfig};
use bindgen::callbacks::{EnumVariantCustomBehavior, EnumVariantValue, ParseCallbacks};
use regex::{Regex, RegexSet};
use std::cmp;
use std::collections::{HashSet, HashMap};
use std::env;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, exit};
use std::slice;
use std::sync::Mutex;
use std::time::SystemTime;
use super::common::*;
use super::super::PYTHON;
use toml;
use toml::value::Table;
const STRUCTS_FILE: &'static str = "structs.rs";
const BINDINGS_FILE: &'static str = "bindings.rs";
fn read_config(path: &PathBuf) -> Table {
println!("cargo:rerun-if-changed={}", path.to_str().unwrap());
update_last_modified(&path);
let mut contents = String::new();
File::open(path).expect("Failed to open config file")
.read_to_string(&mut contents).expect("Failed to read config file");
match toml::from_str::<toml::value::Table>(&contents) {
Ok(result) => result,
Err(e) => panic!("Failed to parse config file: {}", e)
}
}
lazy_static! {
static ref CONFIG: Table = {
// Load Gecko's binding generator config from the source tree.
let path = PathBuf::from(env::var_os("MOZ_SRC").unwrap())
.join("layout/style/ServoBindings.toml");
read_config(&path)
};
static ref BUILD_CONFIG: Table = {
// Load build-specific config overrides.
// FIXME: We should merge with CONFIG above instead of
// forcing callers to do it.
let path = PathBuf::from(env::var_os("MOZ_TOPOBJDIR").unwrap())
.join("layout/style/bindgen.toml");
read_config(&path)
};
static ref TARGET_INFO: HashMap<String, String> = {
const TARGET_PREFIX: &'static str = "CARGO_CFG_TARGET_";
let mut result = HashMap::new();
for (k, v) in env::vars() {
if k.starts_with(TARGET_PREFIX) {
result.insert(k[TARGET_PREFIX.len()..].to_lowercase(), v);
}
}
result
};
static ref INCLUDE_RE: Regex = Regex::new(r#"#include\s*"(.+?)""#).unwrap();
static ref DISTDIR_PATH: PathBuf = {
let path = PathBuf::from(env::var_os("MOZ_DIST").unwrap());
if!path.is_absolute() ||!path.is_dir() {
panic!("MOZ_DIST must be an absolute directory, was: {}", path.display());
}
path
};
static ref SEARCH_PATHS: Vec<PathBuf> = vec![
DISTDIR_PATH.join("include"),
DISTDIR_PATH.join("include/nspr"),
];
static ref ADDED_PATHS: Mutex<HashSet<PathBuf>> = Mutex::new(HashSet::new());
static ref LAST_MODIFIED: Mutex<SystemTime> =
Mutex::new(get_modified_time(&env::current_exe().unwrap())
.expect("Failed to get modified time of executable"));
}
fn get_modified_time(file: &Path) -> Option<SystemTime> {
file.metadata().and_then(|m| m.modified()).ok()
}
fn update_last_modified(file: &Path) {
let modified = get_modified_time(file)
.expect("Couldn't get file modification time");
let mut last_modified = LAST_MODIFIED.lock().unwrap();
*last_modified = cmp::max(modified, *last_modified);
}
fn search_include(name: &str) -> Option<PathBuf> {
for path in SEARCH_PATHS.iter() {
let file = path.join(name);
if file.is_file() {
update_last_modified(&file);
return Some(file);
}
}
None
}
fn add_headers_recursively(path: PathBuf, added_paths: &mut HashSet<PathBuf>) {
if added_paths.contains(&path) {
return;
}
let mut file = File::open(&path).unwrap();
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
println!("cargo:rerun-if-changed={}", path.to_str().unwrap());
added_paths.insert(path);
// Find all includes and add them recursively
for cap in INCLUDE_RE.captures_iter(&content) {
if let Some(path) = search_include(cap.get(1).unwrap().as_str()) {
add_headers_recursively(path, added_paths);
}
}
}
fn add_include(name: &str) -> String {
let mut added_paths = ADDED_PATHS.lock().unwrap();
let file = search_include(name).expect("Include not found!");
let result = String::from(file.to_str().unwrap());
add_headers_recursively(file, &mut *added_paths);
result
}
trait BuilderExt {
fn get_initial_builder() -> Builder;
fn include<T: Into<String>>(self, file: T) -> Builder;
fn zero_size_type(self, ty: &str, structs_list: &HashSet<&str>) -> Builder;
fn borrowed_type(self, ty: &str) -> Builder;
fn mutable_borrowed_type(self, ty: &str) -> Builder;
}
fn add_clang_args(mut builder: Builder, config: &Table, matched_os: &mut bool) -> Builder {
fn add_args(mut builder: Builder, values: &[toml::Value]) -> Builder {
for item in values.iter() {
builder = builder.clang_arg(item.as_str().expect("Expect string in list"));
}
builder
}
for (k, v) in config.iter() {
if k == "args" {
builder = add_args(builder, v.as_array().unwrap().as_slice());
continue;
}
let equal_idx = k.find('=').expect(&format!("Invalid key: {}", k));
let (target_type, target_value) = k.split_at(equal_idx);
if TARGET_INFO[target_type]!= target_value[1..] {
continue;
}
if target_type == "os" {
*matched_os = true;
}
builder = match *v {
toml::Value::Table(ref table) => add_clang_args(builder, table, matched_os),
toml::Value::Array(ref array) => add_args(builder, array),
_ => panic!("Unknown type"),
};
}
builder
}
impl BuilderExt for Builder {
fn get_initial_builder() -> Builder {
use bindgen::RustTarget;
// Disable rust unions, because we replace some types inside of
// them.
let mut builder = Builder::default()
.rust_target(RustTarget::Stable_1_0);
let rustfmt_path = env::var_os("MOZ_AUTOMATION").and_then(|_| {
env::var_os("TOOLTOOL_DIR").or_else(|| env::var_os("MOZ_SRC"))
}).map(PathBuf::from);
builder = match rustfmt_path {
Some(path) => {
builder.with_rustfmt(path.join("rustc").join("bin").join("rustfmt"))
},
None => {
builder.rustfmt_bindings(env::var_os("STYLO_RUSTFMT_BINDINGS").is_some())
}
};
for dir in SEARCH_PATHS.iter() {
builder = builder.clang_arg("-I").clang_arg(dir.to_str().unwrap());
}
builder = builder.include(add_include("mozilla-config.h"));
if env::var("CARGO_FEATURE_GECKO_DEBUG").is_ok() {
builder = builder.clang_arg("-DDEBUG=1").clang_arg("-DJS_DEBUG=1");
}
let mut matched_os = false;
let build_config = CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
let build_config = BUILD_CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
if!matched_os {
panic!("Unknown platform");
}
builder
}
fn include<T: Into<String>>(self, file: T) -> Builder {
self.clang_arg("-include").clang_arg(file)
}
// This makes an FFI-safe void type that can't be matched on
// &VoidType is UB to have, because you can match on it
// to produce a reachable unreachable. If it's wrapped in
// a struct as a private field it becomes okay again
//
// Not 100% sure of how safe this is, but it's what we're using
// in the XPCOM ffi too
// https://github.com/nikomatsakis/rust-memory-model/issues/2
fn zero_size_type(self, ty: &str, structs_list: &HashSet<&str>) -> Builder {
if!structs_list.contains(ty) {
self.blacklist_type(ty)
.raw_line(format!("enum {}Void {{ }}", ty))
.raw_line(format!("pub struct {0}({0}Void);", ty))
} else {
self
}
}
fn borrowed_type(self, ty: &str) -> Builder {
self.blacklist_type(format!("{}Borrowed", ty))
.raw_line(format!("pub type {0}Borrowed<'a> = &'a {0};", ty))
.blacklist_type(format!("{}BorrowedOrNull", ty))
.raw_line(format!("pub type {0}BorrowedOrNull<'a> = Option<&'a {0}>;", ty))
}
fn mutable_borrowed_type(self, ty: &str) -> Builder {
self.borrowed_type(ty)
.blacklist_type(format!("{}BorrowedMut", ty))
.raw_line(format!("pub type {0}BorrowedMut<'a> = &'a mut {0};", ty))
.blacklist_type(format!("{}BorrowedMutOrNull", ty))
.raw_line(format!("pub type {0}BorrowedMutOrNull<'a> = Option<&'a mut {0}>;", ty))
}
}
struct Fixup {
pat: String,
rep: String
}
fn write_binding_file(builder: Builder, file: &str, fixups: &[Fixup]) {
let out_file = OUTDIR_PATH.join(file);
if let Some(modified) = get_modified_time(&out_file) {
// Don't generate the file if nothing it depends on was modified.
let last_modified = LAST_MODIFIED.lock().unwrap();
if *last_modified <= modified {
return;
}
}
let command_line_opts = builder.command_line_flags();
let result = builder.generate();
let mut result = match result {
Ok(bindings) => bindings.to_string(),
Err(_) => {
panic!("Failed to generate bindings, flags: {:?}", command_line_opts);
},
};
for fixup in fixups.iter() {
result = Regex::new(&fixup.pat).unwrap().replace_all(&result, &*fixup.rep)
.into_owned().into();
}
let bytes = result.into_bytes();
File::create(&out_file).unwrap().write_all(&bytes).expect("Unable to write output");
}
fn get_arc_types() -> Vec<String> {
// Read the file
let mut list_file = File::open(DISTDIR_PATH.join("include/mozilla/ServoArcTypeList.h"))
.expect("Unable to open ServoArcTypeList.h");
let mut content = String::new();
list_file.read_to_string(&mut content).expect("Fail to read ServoArcTypeList.h");
// Remove comments
let block_comment_re = Regex::new(r#"(?s)/\*.*?\*/"#).unwrap();
let content = block_comment_re.replace_all(&content, "");
// Extract the list
let re = Regex::new(r#"^SERVO_ARC_TYPE\(\w+,\s*(\w+)\)$"#).unwrap();
content.lines().map(|line| line.trim()).filter(|line|!line.is_empty())
.map(|line| re.captures(&line)
.expect(&format!("Unrecognized line in ServoArcTypeList.h: '{}'", line))
.get(1).unwrap().as_str().to_string())
.collect()
}
struct BuilderWithConfig<'a> {
builder: Builder,
config: &'a Table,
used_keys: HashSet<&'static str>,
}
impl<'a> BuilderWithConfig<'a> {
fn new(builder: Builder, config: &'a Table) -> Self {
BuilderWithConfig {
builder, config,
used_keys: HashSet::new(),
}
}
fn handle_list<F>(self, key: &'static str, func: F) -> BuilderWithConfig<'a>
where F: FnOnce(Builder, slice::Iter<'a, toml::Value>) -> Builder {
let mut builder = self.builder;
let config = self.config;
let mut used_keys = self.used_keys;
if let Some(list) = config.get(key) {
used_keys.insert(key);
builder = func(builder, list.as_array().unwrap().as_slice().iter());
}
BuilderWithConfig { builder, config, used_keys }
}
fn handle_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a toml::Value) -> Builder {
self.handle_list(key, |b, iter| iter.fold(b, |b, item| func(b, item)))
}
fn handle_str_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a str) -> Builder {
self.handle_items(key, |b, item| func(b, item.as_str().unwrap()))
}
fn handle_table_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where
F: FnMut(Builder, &'a Table) -> Builder
{
self.handle_items(key, |b, item| func(b, item.as_table().unwrap()))
}
fn handle_common(self, fixups: &mut Vec<Fixup>) -> BuilderWithConfig<'a> {
self.handle_str_items("headers", |b, item| b.header(add_include(item)))
.handle_str_items("raw-lines", |b, item| b.raw_line(item))
.handle_str_items("hide-types", |b, item| b.blacklist_type(item))
.handle_table_items("fixups", |builder, item| {
fixups.push(Fixup {
pat: item["pat"].as_str().unwrap().into(),
rep: item["rep"].as_str().unwrap().into(),
});
builder
})
}
fn get_builder(self) -> Builder {
for key in self.config.keys() {
if!self.used_keys.contains(key.as_str()) {
panic!(format!("Unknown key: {}", key));
}
}
self.builder
}
}
fn generate_structs() {
#[derive(Debug)]
struct Callbacks(HashMap<String, RegexSet>);
impl ParseCallbacks for Callbacks {
fn enum_variant_behavior(&self,
enum_name: Option<&str>,
variant_name: &str,
_variant_value: EnumVariantValue)
-> Option<EnumVariantCustomBehavior> {
enum_name.and_then(|enum_name| self.0.get(enum_name))
.and_then(|regex| if regex.is_match(variant_name) {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
})
}
}
let builder = Builder::get_initial_builder()
.enable_cxx_namespaces()
.with_codegen_config(CodegenConfig {
types: true,
vars: true,
..CodegenConfig::nothing()
});
let mut fixups = vec![];
let builder = BuilderWithConfig::new(builder, CONFIG["structs"].as_table().unwrap())
.handle_common(&mut fixups)
.handle_str_items("bitfield-enums", |b, item| b.bitfield_enum(item))
.handle_str_items("rusty-enums", |b, item| b.rustified_enum(item))
.handle_str_items("whitelist-vars", |b, item| b.whitelist_var(item))
.handle_str_items("whitelist-types", |b, item| b.whitelist_type(item))
.handle_str_items("opaque-types", |b, item| b.opaque_type(item))
.handle_list("constified-enum-variants", |builder, iter| {
let mut map = HashMap::new();
for item in iter {
let item = item.as_table().unwrap();
let name = item["enum"].as_str().unwrap();
let variants = item["variants"].as_array().unwrap().as_slice().iter()
.map(|item| item.as_str().unwrap());
map.insert(name.into(), RegexSet::new(variants).unwrap());
}
builder.parse_callbacks(Box::new(Callbacks(map)))
})
.handle_table_items("mapped-generic-types", |builder, item| {
let generic = item["generic"].as_bool().unwrap();
let gecko = item["gecko"].as_str().unwrap();
let servo = item["servo"].as_str().unwrap();
let gecko_name = gecko.rsplit("::").next().unwrap();
let
|
{
let to = to.as_ref();
for entry in from.as_ref().read_dir()? {
let entry = entry?;
let path = entry.path();
callback(&path);
fs::copy(&path, to.join(entry.file_name()))?;
}
Ok(())
}
|
identifier_body
|
|
build_gecko.rs
|
}
};
for dir in SEARCH_PATHS.iter() {
builder = builder.clang_arg("-I").clang_arg(dir.to_str().unwrap());
}
builder = builder.include(add_include("mozilla-config.h"));
if env::var("CARGO_FEATURE_GECKO_DEBUG").is_ok() {
builder = builder.clang_arg("-DDEBUG=1").clang_arg("-DJS_DEBUG=1");
}
let mut matched_os = false;
let build_config = CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
let build_config = BUILD_CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
if!matched_os {
panic!("Unknown platform");
}
builder
}
fn include<T: Into<String>>(self, file: T) -> Builder {
self.clang_arg("-include").clang_arg(file)
}
// This makes an FFI-safe void type that can't be matched on
// &VoidType is UB to have, because you can match on it
// to produce a reachable unreachable. If it's wrapped in
// a struct as a private field it becomes okay again
//
// Not 100% sure of how safe this is, but it's what we're using
// in the XPCOM ffi too
// https://github.com/nikomatsakis/rust-memory-model/issues/2
fn zero_size_type(self, ty: &str, structs_list: &HashSet<&str>) -> Builder {
if!structs_list.contains(ty) {
self.blacklist_type(ty)
.raw_line(format!("enum {}Void {{ }}", ty))
.raw_line(format!("pub struct {0}({0}Void);", ty))
} else {
self
}
}
fn borrowed_type(self, ty: &str) -> Builder {
self.blacklist_type(format!("{}Borrowed", ty))
.raw_line(format!("pub type {0}Borrowed<'a> = &'a {0};", ty))
.blacklist_type(format!("{}BorrowedOrNull", ty))
.raw_line(format!("pub type {0}BorrowedOrNull<'a> = Option<&'a {0}>;", ty))
}
fn mutable_borrowed_type(self, ty: &str) -> Builder {
self.borrowed_type(ty)
.blacklist_type(format!("{}BorrowedMut", ty))
.raw_line(format!("pub type {0}BorrowedMut<'a> = &'a mut {0};", ty))
.blacklist_type(format!("{}BorrowedMutOrNull", ty))
.raw_line(format!("pub type {0}BorrowedMutOrNull<'a> = Option<&'a mut {0}>;", ty))
}
}
struct Fixup {
pat: String,
rep: String
}
fn write_binding_file(builder: Builder, file: &str, fixups: &[Fixup]) {
let out_file = OUTDIR_PATH.join(file);
if let Some(modified) = get_modified_time(&out_file) {
// Don't generate the file if nothing it depends on was modified.
let last_modified = LAST_MODIFIED.lock().unwrap();
if *last_modified <= modified {
return;
}
}
let command_line_opts = builder.command_line_flags();
let result = builder.generate();
let mut result = match result {
Ok(bindings) => bindings.to_string(),
Err(_) => {
panic!("Failed to generate bindings, flags: {:?}", command_line_opts);
},
};
for fixup in fixups.iter() {
result = Regex::new(&fixup.pat).unwrap().replace_all(&result, &*fixup.rep)
.into_owned().into();
}
let bytes = result.into_bytes();
File::create(&out_file).unwrap().write_all(&bytes).expect("Unable to write output");
}
fn get_arc_types() -> Vec<String> {
// Read the file
let mut list_file = File::open(DISTDIR_PATH.join("include/mozilla/ServoArcTypeList.h"))
.expect("Unable to open ServoArcTypeList.h");
let mut content = String::new();
list_file.read_to_string(&mut content).expect("Fail to read ServoArcTypeList.h");
// Remove comments
let block_comment_re = Regex::new(r#"(?s)/\*.*?\*/"#).unwrap();
let content = block_comment_re.replace_all(&content, "");
// Extract the list
let re = Regex::new(r#"^SERVO_ARC_TYPE\(\w+,\s*(\w+)\)$"#).unwrap();
content.lines().map(|line| line.trim()).filter(|line|!line.is_empty())
.map(|line| re.captures(&line)
.expect(&format!("Unrecognized line in ServoArcTypeList.h: '{}'", line))
.get(1).unwrap().as_str().to_string())
.collect()
}
struct BuilderWithConfig<'a> {
builder: Builder,
config: &'a Table,
used_keys: HashSet<&'static str>,
}
impl<'a> BuilderWithConfig<'a> {
fn new(builder: Builder, config: &'a Table) -> Self {
BuilderWithConfig {
builder, config,
used_keys: HashSet::new(),
}
}
fn handle_list<F>(self, key: &'static str, func: F) -> BuilderWithConfig<'a>
where F: FnOnce(Builder, slice::Iter<'a, toml::Value>) -> Builder {
let mut builder = self.builder;
let config = self.config;
let mut used_keys = self.used_keys;
if let Some(list) = config.get(key) {
used_keys.insert(key);
builder = func(builder, list.as_array().unwrap().as_slice().iter());
}
BuilderWithConfig { builder, config, used_keys }
}
fn handle_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a toml::Value) -> Builder {
self.handle_list(key, |b, iter| iter.fold(b, |b, item| func(b, item)))
}
fn handle_str_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a str) -> Builder {
self.handle_items(key, |b, item| func(b, item.as_str().unwrap()))
}
fn handle_table_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where
F: FnMut(Builder, &'a Table) -> Builder
{
self.handle_items(key, |b, item| func(b, item.as_table().unwrap()))
}
fn handle_common(self, fixups: &mut Vec<Fixup>) -> BuilderWithConfig<'a> {
self.handle_str_items("headers", |b, item| b.header(add_include(item)))
.handle_str_items("raw-lines", |b, item| b.raw_line(item))
.handle_str_items("hide-types", |b, item| b.blacklist_type(item))
.handle_table_items("fixups", |builder, item| {
fixups.push(Fixup {
pat: item["pat"].as_str().unwrap().into(),
rep: item["rep"].as_str().unwrap().into(),
});
builder
})
}
fn get_builder(self) -> Builder {
for key in self.config.keys() {
if!self.used_keys.contains(key.as_str()) {
panic!(format!("Unknown key: {}", key));
}
}
self.builder
}
}
fn generate_structs() {
#[derive(Debug)]
struct Callbacks(HashMap<String, RegexSet>);
impl ParseCallbacks for Callbacks {
fn enum_variant_behavior(&self,
enum_name: Option<&str>,
variant_name: &str,
_variant_value: EnumVariantValue)
-> Option<EnumVariantCustomBehavior> {
enum_name.and_then(|enum_name| self.0.get(enum_name))
.and_then(|regex| if regex.is_match(variant_name) {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
})
}
}
let builder = Builder::get_initial_builder()
.enable_cxx_namespaces()
.with_codegen_config(CodegenConfig {
types: true,
vars: true,
..CodegenConfig::nothing()
});
let mut fixups = vec![];
let builder = BuilderWithConfig::new(builder, CONFIG["structs"].as_table().unwrap())
.handle_common(&mut fixups)
.handle_str_items("bitfield-enums", |b, item| b.bitfield_enum(item))
.handle_str_items("rusty-enums", |b, item| b.rustified_enum(item))
.handle_str_items("whitelist-vars", |b, item| b.whitelist_var(item))
.handle_str_items("whitelist-types", |b, item| b.whitelist_type(item))
.handle_str_items("opaque-types", |b, item| b.opaque_type(item))
.handle_list("constified-enum-variants", |builder, iter| {
let mut map = HashMap::new();
for item in iter {
let item = item.as_table().unwrap();
let name = item["enum"].as_str().unwrap();
let variants = item["variants"].as_array().unwrap().as_slice().iter()
.map(|item| item.as_str().unwrap());
map.insert(name.into(), RegexSet::new(variants).unwrap());
}
builder.parse_callbacks(Box::new(Callbacks(map)))
})
.handle_table_items("mapped-generic-types", |builder, item| {
let generic = item["generic"].as_bool().unwrap();
let gecko = item["gecko"].as_str().unwrap();
let servo = item["servo"].as_str().unwrap();
let gecko_name = gecko.rsplit("::").next().unwrap();
let gecko = gecko.split("::")
.map(|s| format!("\\s*{}\\s*", s))
.collect::<Vec<_>>()
.join("::");
fixups.push(Fixup {
pat: format!("\\broot\\s*::\\s*{}\\b", gecko),
rep: format!("::gecko_bindings::structs::{}", gecko_name)
});
builder.blacklist_type(gecko)
.raw_line(format!("pub type {0}{2} = {1}{2};", gecko_name, servo,
if generic { "<T>" } else { "" }))
})
.get_builder();
write_binding_file(builder, STRUCTS_FILE, &fixups);
}
fn setup_logging() -> bool {
use log;
struct BuildLogger {
file: Option<Mutex<fs::File>>,
filter: String,
}
impl log::Log for BuildLogger {
fn enabled(&self, meta: &log::LogMetadata) -> bool {
self.file.is_some() && meta.target().contains(&self.filter)
}
fn log(&self, record: &log::LogRecord) {
if!self.enabled(record.metadata()) {
return;
}
let mut file = self.file.as_ref().unwrap().lock().unwrap();
let _ =
writeln!(file, "{} - {} - {} @ {}:{}",
record.level(),
record.target(),
record.args(),
record.location().file(),
record.location().line());
}
}
if let Some(path) = env::var_os("STYLO_BUILD_LOG") {
log::set_logger(|log_level| {
log_level.set(log::LogLevelFilter::Debug);
Box::new(BuildLogger {
file: fs::File::create(path).ok().map(Mutex::new),
filter: env::var("STYLO_BUILD_FILTER").ok()
.unwrap_or_else(|| "bindgen".to_owned()),
})
})
.expect("Failed to set logger.");
true
} else {
false
}
}
fn generate_bindings() {
let builder = Builder::get_initial_builder()
.disable_name_namespacing()
.with_codegen_config(CodegenConfig {
functions: true,
..CodegenConfig::nothing()
});
let config = CONFIG["bindings"].as_table().unwrap();
let mut structs_types = HashSet::new();
let mut fixups = vec![];
let mut builder = BuilderWithConfig::new(builder, config)
.handle_common(&mut fixups)
.handle_str_items("whitelist-functions", |b, item| b.whitelist_function(item))
.handle_str_items("structs-types", |mut builder, ty| {
builder = builder.blacklist_type(ty)
.raw_line(format!("use gecko_bindings::structs::{};", ty));
structs_types.insert(ty);
// TODO this is hacky, figure out a better way to do it without
// hardcoding everything...
if ty.starts_with("nsStyle") {
builder = builder
.raw_line(format!("unsafe impl Send for {} {{}}", ty))
.raw_line(format!("unsafe impl Sync for {} {{}}", ty));
}
builder
})
// TODO This was added due to servo/rust-bindgen#75, but
// that has been fixed in clang 4.0+. When we switch people
// to libclang 4.0, we can remove this.
.handle_table_items("array-types", |builder, item| {
let cpp_type = item["cpp-type"].as_str().unwrap();
let rust_type = item["rust-type"].as_str().unwrap();
builder
.raw_line(format!(concat!("pub type nsTArrayBorrowed_{}<'a> = ",
"&'a mut ::gecko_bindings::structs::nsTArray<{}>;"),
cpp_type, rust_type))
})
.handle_table_items("servo-owned-types", |mut builder, item| {
let name = item["name"].as_str().unwrap();
builder = builder.blacklist_type(format!("{}Owned", name))
.raw_line(format!("pub type {0}Owned = ::gecko_bindings::sugar::ownership::Owned<{0}>;", name))
.blacklist_type(format!("{}OwnedOrNull", name))
.raw_line(format!(concat!("pub type {0}OwnedOrNull = ",
"::gecko_bindings::sugar::ownership::OwnedOrNull<{0}>;"), name))
.mutable_borrowed_type(name);
if item["opaque"].as_bool().unwrap() {
builder = builder.zero_size_type(name, &structs_types);
}
builder
})
.handle_str_items("servo-immutable-borrow-types", |b, ty| b.borrowed_type(ty))
// Right now the only immutable borrow types are ones which we import
// from the |structs| module. As such, we don't need to create an opaque
// type with zero_size_type. If we ever introduce immutable borrow types
// which _do_ need to be opaque, we'll need a separate mode.
.handle_str_items("servo-borrow-types", |b, ty| b.mutable_borrowed_type(ty))
.get_builder();
for ty in get_arc_types().iter() {
builder = builder
.blacklist_type(format!("{}Strong", ty))
.raw_line(format!("pub type {0}Strong = ::gecko_bindings::sugar::ownership::Strong<{0}>;", ty))
.borrowed_type(ty)
.zero_size_type(ty, &structs_types);
}
write_binding_file(builder, BINDINGS_FILE, &fixups);
}
fn generate_atoms() {
let script = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("gecko").join("regen_atoms.py");
println!("cargo:rerun-if-changed={}", script.display());
let status = Command::new(&*PYTHON)
.arg(&script)
.arg(DISTDIR_PATH.as_os_str())
.arg(OUTDIR_PATH.as_os_str())
.status()
.unwrap();
if!status.success() {
exit(1);
}
}
pub fn generate() {
|
use std::thread;
macro_rules! run_tasks {
($($task:expr,)+) => {
|
random_line_split
|
|
build_gecko.rs
|
);
}
}
result
};
static ref INCLUDE_RE: Regex = Regex::new(r#"#include\s*"(.+?)""#).unwrap();
static ref DISTDIR_PATH: PathBuf = {
let path = PathBuf::from(env::var_os("MOZ_DIST").unwrap());
if!path.is_absolute() ||!path.is_dir() {
panic!("MOZ_DIST must be an absolute directory, was: {}", path.display());
}
path
};
static ref SEARCH_PATHS: Vec<PathBuf> = vec![
DISTDIR_PATH.join("include"),
DISTDIR_PATH.join("include/nspr"),
];
static ref ADDED_PATHS: Mutex<HashSet<PathBuf>> = Mutex::new(HashSet::new());
static ref LAST_MODIFIED: Mutex<SystemTime> =
Mutex::new(get_modified_time(&env::current_exe().unwrap())
.expect("Failed to get modified time of executable"));
}
fn get_modified_time(file: &Path) -> Option<SystemTime> {
file.metadata().and_then(|m| m.modified()).ok()
}
fn update_last_modified(file: &Path) {
let modified = get_modified_time(file)
.expect("Couldn't get file modification time");
let mut last_modified = LAST_MODIFIED.lock().unwrap();
*last_modified = cmp::max(modified, *last_modified);
}
fn search_include(name: &str) -> Option<PathBuf> {
for path in SEARCH_PATHS.iter() {
let file = path.join(name);
if file.is_file() {
update_last_modified(&file);
return Some(file);
}
}
None
}
fn add_headers_recursively(path: PathBuf, added_paths: &mut HashSet<PathBuf>) {
if added_paths.contains(&path) {
return;
}
let mut file = File::open(&path).unwrap();
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
println!("cargo:rerun-if-changed={}", path.to_str().unwrap());
added_paths.insert(path);
// Find all includes and add them recursively
for cap in INCLUDE_RE.captures_iter(&content) {
if let Some(path) = search_include(cap.get(1).unwrap().as_str()) {
add_headers_recursively(path, added_paths);
}
}
}
fn add_include(name: &str) -> String {
let mut added_paths = ADDED_PATHS.lock().unwrap();
let file = search_include(name).expect("Include not found!");
let result = String::from(file.to_str().unwrap());
add_headers_recursively(file, &mut *added_paths);
result
}
trait BuilderExt {
fn get_initial_builder() -> Builder;
fn include<T: Into<String>>(self, file: T) -> Builder;
fn zero_size_type(self, ty: &str, structs_list: &HashSet<&str>) -> Builder;
fn borrowed_type(self, ty: &str) -> Builder;
fn mutable_borrowed_type(self, ty: &str) -> Builder;
}
fn add_clang_args(mut builder: Builder, config: &Table, matched_os: &mut bool) -> Builder {
fn add_args(mut builder: Builder, values: &[toml::Value]) -> Builder {
for item in values.iter() {
builder = builder.clang_arg(item.as_str().expect("Expect string in list"));
}
builder
}
for (k, v) in config.iter() {
if k == "args" {
builder = add_args(builder, v.as_array().unwrap().as_slice());
continue;
}
let equal_idx = k.find('=').expect(&format!("Invalid key: {}", k));
let (target_type, target_value) = k.split_at(equal_idx);
if TARGET_INFO[target_type]!= target_value[1..] {
continue;
}
if target_type == "os" {
*matched_os = true;
}
builder = match *v {
toml::Value::Table(ref table) => add_clang_args(builder, table, matched_os),
toml::Value::Array(ref array) => add_args(builder, array),
_ => panic!("Unknown type"),
};
}
builder
}
impl BuilderExt for Builder {
fn get_initial_builder() -> Builder {
use bindgen::RustTarget;
// Disable rust unions, because we replace some types inside of
// them.
let mut builder = Builder::default()
.rust_target(RustTarget::Stable_1_0);
let rustfmt_path = env::var_os("MOZ_AUTOMATION").and_then(|_| {
env::var_os("TOOLTOOL_DIR").or_else(|| env::var_os("MOZ_SRC"))
}).map(PathBuf::from);
builder = match rustfmt_path {
Some(path) => {
builder.with_rustfmt(path.join("rustc").join("bin").join("rustfmt"))
},
None => {
builder.rustfmt_bindings(env::var_os("STYLO_RUSTFMT_BINDINGS").is_some())
}
};
for dir in SEARCH_PATHS.iter() {
builder = builder.clang_arg("-I").clang_arg(dir.to_str().unwrap());
}
builder = builder.include(add_include("mozilla-config.h"));
if env::var("CARGO_FEATURE_GECKO_DEBUG").is_ok() {
builder = builder.clang_arg("-DDEBUG=1").clang_arg("-DJS_DEBUG=1");
}
let mut matched_os = false;
let build_config = CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
let build_config = BUILD_CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
if!matched_os {
panic!("Unknown platform");
}
builder
}
fn include<T: Into<String>>(self, file: T) -> Builder {
self.clang_arg("-include").clang_arg(file)
}
// This makes an FFI-safe void type that can't be matched on
// &VoidType is UB to have, because you can match on it
// to produce a reachable unreachable. If it's wrapped in
// a struct as a private field it becomes okay again
//
// Not 100% sure of how safe this is, but it's what we're using
// in the XPCOM ffi too
// https://github.com/nikomatsakis/rust-memory-model/issues/2
fn zero_size_type(self, ty: &str, structs_list: &HashSet<&str>) -> Builder {
if!structs_list.contains(ty) {
self.blacklist_type(ty)
.raw_line(format!("enum {}Void {{ }}", ty))
.raw_line(format!("pub struct {0}({0}Void);", ty))
} else {
self
}
}
fn borrowed_type(self, ty: &str) -> Builder {
self.blacklist_type(format!("{}Borrowed", ty))
.raw_line(format!("pub type {0}Borrowed<'a> = &'a {0};", ty))
.blacklist_type(format!("{}BorrowedOrNull", ty))
.raw_line(format!("pub type {0}BorrowedOrNull<'a> = Option<&'a {0}>;", ty))
}
fn mutable_borrowed_type(self, ty: &str) -> Builder {
self.borrowed_type(ty)
.blacklist_type(format!("{}BorrowedMut", ty))
.raw_line(format!("pub type {0}BorrowedMut<'a> = &'a mut {0};", ty))
.blacklist_type(format!("{}BorrowedMutOrNull", ty))
.raw_line(format!("pub type {0}BorrowedMutOrNull<'a> = Option<&'a mut {0}>;", ty))
}
}
struct Fixup {
pat: String,
rep: String
}
fn write_binding_file(builder: Builder, file: &str, fixups: &[Fixup]) {
let out_file = OUTDIR_PATH.join(file);
if let Some(modified) = get_modified_time(&out_file) {
// Don't generate the file if nothing it depends on was modified.
let last_modified = LAST_MODIFIED.lock().unwrap();
if *last_modified <= modified {
return;
}
}
let command_line_opts = builder.command_line_flags();
let result = builder.generate();
let mut result = match result {
Ok(bindings) => bindings.to_string(),
Err(_) =>
|
,
};
for fixup in fixups.iter() {
result = Regex::new(&fixup.pat).unwrap().replace_all(&result, &*fixup.rep)
.into_owned().into();
}
let bytes = result.into_bytes();
File::create(&out_file).unwrap().write_all(&bytes).expect("Unable to write output");
}
fn get_arc_types() -> Vec<String> {
// Read the file
let mut list_file = File::open(DISTDIR_PATH.join("include/mozilla/ServoArcTypeList.h"))
.expect("Unable to open ServoArcTypeList.h");
let mut content = String::new();
list_file.read_to_string(&mut content).expect("Fail to read ServoArcTypeList.h");
// Remove comments
let block_comment_re = Regex::new(r#"(?s)/\*.*?\*/"#).unwrap();
let content = block_comment_re.replace_all(&content, "");
// Extract the list
let re = Regex::new(r#"^SERVO_ARC_TYPE\(\w+,\s*(\w+)\)$"#).unwrap();
content.lines().map(|line| line.trim()).filter(|line|!line.is_empty())
.map(|line| re.captures(&line)
.expect(&format!("Unrecognized line in ServoArcTypeList.h: '{}'", line))
.get(1).unwrap().as_str().to_string())
.collect()
}
struct BuilderWithConfig<'a> {
builder: Builder,
config: &'a Table,
used_keys: HashSet<&'static str>,
}
impl<'a> BuilderWithConfig<'a> {
fn new(builder: Builder, config: &'a Table) -> Self {
BuilderWithConfig {
builder, config,
used_keys: HashSet::new(),
}
}
fn handle_list<F>(self, key: &'static str, func: F) -> BuilderWithConfig<'a>
where F: FnOnce(Builder, slice::Iter<'a, toml::Value>) -> Builder {
let mut builder = self.builder;
let config = self.config;
let mut used_keys = self.used_keys;
if let Some(list) = config.get(key) {
used_keys.insert(key);
builder = func(builder, list.as_array().unwrap().as_slice().iter());
}
BuilderWithConfig { builder, config, used_keys }
}
fn handle_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a toml::Value) -> Builder {
self.handle_list(key, |b, iter| iter.fold(b, |b, item| func(b, item)))
}
fn handle_str_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a str) -> Builder {
self.handle_items(key, |b, item| func(b, item.as_str().unwrap()))
}
fn handle_table_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where
F: FnMut(Builder, &'a Table) -> Builder
{
self.handle_items(key, |b, item| func(b, item.as_table().unwrap()))
}
fn handle_common(self, fixups: &mut Vec<Fixup>) -> BuilderWithConfig<'a> {
self.handle_str_items("headers", |b, item| b.header(add_include(item)))
.handle_str_items("raw-lines", |b, item| b.raw_line(item))
.handle_str_items("hide-types", |b, item| b.blacklist_type(item))
.handle_table_items("fixups", |builder, item| {
fixups.push(Fixup {
pat: item["pat"].as_str().unwrap().into(),
rep: item["rep"].as_str().unwrap().into(),
});
builder
})
}
fn get_builder(self) -> Builder {
for key in self.config.keys() {
if!self.used_keys.contains(key.as_str()) {
panic!(format!("Unknown key: {}", key));
}
}
self.builder
}
}
fn generate_structs() {
#[derive(Debug)]
struct Callbacks(HashMap<String, RegexSet>);
impl ParseCallbacks for Callbacks {
fn enum_variant_behavior(&self,
enum_name: Option<&str>,
variant_name: &str,
_variant_value: EnumVariantValue)
-> Option<EnumVariantCustomBehavior> {
enum_name.and_then(|enum_name| self.0.get(enum_name))
.and_then(|regex| if regex.is_match(variant_name) {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
})
}
}
let builder = Builder::get_initial_builder()
.enable_cxx_namespaces()
.with_codegen_config(CodegenConfig {
types: true,
vars: true,
..CodegenConfig::nothing()
});
let mut fixups = vec![];
let builder = BuilderWithConfig::new(builder, CONFIG["structs"].as_table().unwrap())
.handle_common(&mut fixups)
.handle_str_items("bitfield-enums", |b, item| b.bitfield_enum(item))
.handle_str_items("rusty-enums", |b, item| b.rustified_enum(item))
.handle_str_items("whitelist-vars", |b, item| b.whitelist_var(item))
.handle_str_items("whitelist-types", |b, item| b.whitelist_type(item))
.handle_str_items("opaque-types", |b, item| b.opaque_type(item))
.handle_list("constified-enum-variants", |builder, iter| {
let mut map = HashMap::new();
for item in iter {
let item = item.as_table().unwrap();
let name = item["enum"].as_str().unwrap();
let variants = item["variants"].as_array().unwrap().as_slice().iter()
.map(|item| item.as_str().unwrap());
map.insert(name.into(), RegexSet::new(variants).unwrap());
}
builder.parse_callbacks(Box::new(Callbacks(map)))
})
.handle_table_items("mapped-generic-types", |builder, item| {
let generic = item["generic"].as_bool().unwrap();
let gecko = item["gecko"].as_str().unwrap();
let servo = item["servo"].as_str().unwrap();
let gecko_name = gecko.rsplit("::").next().unwrap();
let gecko = gecko.split("::")
.map(|s| format!("\\s*{}\\s*", s))
.collect::<Vec<_>>()
.join("::");
fixups.push(Fixup {
pat: format!("\\broot\\s*::\\s*{}\\b", gecko),
rep: format!("::gecko_bindings::structs::{}", gecko_name)
});
builder.blacklist_type(gecko)
.raw_line(format!("pub type {0}{2} = {1}{2};", gecko_name, servo,
if generic { "<T>" } else { "" }))
})
.get_builder();
write_binding_file(builder, STRUCTS_FILE, &fixups);
}
fn setup_logging() -> bool {
use log;
struct BuildLogger {
file: Option<Mutex<fs::File>>,
filter: String,
}
impl log::Log for BuildLogger {
fn enabled(&self, meta: &log::LogMetadata) -> bool {
self.file.is_some() && meta.target().contains(&self.filter)
}
fn log(&self, record: &log::LogRecord) {
if!self.enabled(record.metadata()) {
return;
}
let mut file = self.file.as_ref().unwrap().lock().unwrap();
let _ =
writeln!(file, "{} - {} - {} @ {}:{}",
record.level(),
record.target(),
record.args(),
record.location().file(),
record.location().line());
}
}
if let Some(path) = env::var_os("STYLO_BUILD_LOG") {
log::set_logger(|log_level| {
log_level.set(log::LogLevelFilter::Debug);
Box::new(BuildLogger {
|
{
panic!("Failed to generate bindings, flags: {:?}", command_line_opts);
}
|
conditional_block
|
build_gecko.rs
|
builder.rustfmt_bindings(env::var_os("STYLO_RUSTFMT_BINDINGS").is_some())
}
};
for dir in SEARCH_PATHS.iter() {
builder = builder.clang_arg("-I").clang_arg(dir.to_str().unwrap());
}
builder = builder.include(add_include("mozilla-config.h"));
if env::var("CARGO_FEATURE_GECKO_DEBUG").is_ok() {
builder = builder.clang_arg("-DDEBUG=1").clang_arg("-DJS_DEBUG=1");
}
let mut matched_os = false;
let build_config = CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
let build_config = BUILD_CONFIG["build"].as_table().expect("Malformed config file");
builder = add_clang_args(builder, build_config, &mut matched_os);
if!matched_os {
panic!("Unknown platform");
}
builder
}
fn include<T: Into<String>>(self, file: T) -> Builder {
self.clang_arg("-include").clang_arg(file)
}
// This makes an FFI-safe void type that can't be matched on
// &VoidType is UB to have, because you can match on it
// to produce a reachable unreachable. If it's wrapped in
// a struct as a private field it becomes okay again
//
// Not 100% sure of how safe this is, but it's what we're using
// in the XPCOM ffi too
// https://github.com/nikomatsakis/rust-memory-model/issues/2
fn zero_size_type(self, ty: &str, structs_list: &HashSet<&str>) -> Builder {
if!structs_list.contains(ty) {
self.blacklist_type(ty)
.raw_line(format!("enum {}Void {{ }}", ty))
.raw_line(format!("pub struct {0}({0}Void);", ty))
} else {
self
}
}
fn borrowed_type(self, ty: &str) -> Builder {
self.blacklist_type(format!("{}Borrowed", ty))
.raw_line(format!("pub type {0}Borrowed<'a> = &'a {0};", ty))
.blacklist_type(format!("{}BorrowedOrNull", ty))
.raw_line(format!("pub type {0}BorrowedOrNull<'a> = Option<&'a {0}>;", ty))
}
fn mutable_borrowed_type(self, ty: &str) -> Builder {
self.borrowed_type(ty)
.blacklist_type(format!("{}BorrowedMut", ty))
.raw_line(format!("pub type {0}BorrowedMut<'a> = &'a mut {0};", ty))
.blacklist_type(format!("{}BorrowedMutOrNull", ty))
.raw_line(format!("pub type {0}BorrowedMutOrNull<'a> = Option<&'a mut {0}>;", ty))
}
}
struct Fixup {
pat: String,
rep: String
}
fn write_binding_file(builder: Builder, file: &str, fixups: &[Fixup]) {
let out_file = OUTDIR_PATH.join(file);
if let Some(modified) = get_modified_time(&out_file) {
// Don't generate the file if nothing it depends on was modified.
let last_modified = LAST_MODIFIED.lock().unwrap();
if *last_modified <= modified {
return;
}
}
let command_line_opts = builder.command_line_flags();
let result = builder.generate();
let mut result = match result {
Ok(bindings) => bindings.to_string(),
Err(_) => {
panic!("Failed to generate bindings, flags: {:?}", command_line_opts);
},
};
for fixup in fixups.iter() {
result = Regex::new(&fixup.pat).unwrap().replace_all(&result, &*fixup.rep)
.into_owned().into();
}
let bytes = result.into_bytes();
File::create(&out_file).unwrap().write_all(&bytes).expect("Unable to write output");
}
fn get_arc_types() -> Vec<String> {
// Read the file
let mut list_file = File::open(DISTDIR_PATH.join("include/mozilla/ServoArcTypeList.h"))
.expect("Unable to open ServoArcTypeList.h");
let mut content = String::new();
list_file.read_to_string(&mut content).expect("Fail to read ServoArcTypeList.h");
// Remove comments
let block_comment_re = Regex::new(r#"(?s)/\*.*?\*/"#).unwrap();
let content = block_comment_re.replace_all(&content, "");
// Extract the list
let re = Regex::new(r#"^SERVO_ARC_TYPE\(\w+,\s*(\w+)\)$"#).unwrap();
content.lines().map(|line| line.trim()).filter(|line|!line.is_empty())
.map(|line| re.captures(&line)
.expect(&format!("Unrecognized line in ServoArcTypeList.h: '{}'", line))
.get(1).unwrap().as_str().to_string())
.collect()
}
struct BuilderWithConfig<'a> {
builder: Builder,
config: &'a Table,
used_keys: HashSet<&'static str>,
}
impl<'a> BuilderWithConfig<'a> {
fn new(builder: Builder, config: &'a Table) -> Self {
BuilderWithConfig {
builder, config,
used_keys: HashSet::new(),
}
}
fn handle_list<F>(self, key: &'static str, func: F) -> BuilderWithConfig<'a>
where F: FnOnce(Builder, slice::Iter<'a, toml::Value>) -> Builder {
let mut builder = self.builder;
let config = self.config;
let mut used_keys = self.used_keys;
if let Some(list) = config.get(key) {
used_keys.insert(key);
builder = func(builder, list.as_array().unwrap().as_slice().iter());
}
BuilderWithConfig { builder, config, used_keys }
}
fn handle_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a toml::Value) -> Builder {
self.handle_list(key, |b, iter| iter.fold(b, |b, item| func(b, item)))
}
fn handle_str_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where F: FnMut(Builder, &'a str) -> Builder {
self.handle_items(key, |b, item| func(b, item.as_str().unwrap()))
}
fn handle_table_items<F>(self, key: &'static str, mut func: F) -> BuilderWithConfig<'a>
where
F: FnMut(Builder, &'a Table) -> Builder
{
self.handle_items(key, |b, item| func(b, item.as_table().unwrap()))
}
fn handle_common(self, fixups: &mut Vec<Fixup>) -> BuilderWithConfig<'a> {
self.handle_str_items("headers", |b, item| b.header(add_include(item)))
.handle_str_items("raw-lines", |b, item| b.raw_line(item))
.handle_str_items("hide-types", |b, item| b.blacklist_type(item))
.handle_table_items("fixups", |builder, item| {
fixups.push(Fixup {
pat: item["pat"].as_str().unwrap().into(),
rep: item["rep"].as_str().unwrap().into(),
});
builder
})
}
fn get_builder(self) -> Builder {
for key in self.config.keys() {
if!self.used_keys.contains(key.as_str()) {
panic!(format!("Unknown key: {}", key));
}
}
self.builder
}
}
fn generate_structs() {
#[derive(Debug)]
struct Callbacks(HashMap<String, RegexSet>);
impl ParseCallbacks for Callbacks {
fn enum_variant_behavior(&self,
enum_name: Option<&str>,
variant_name: &str,
_variant_value: EnumVariantValue)
-> Option<EnumVariantCustomBehavior> {
enum_name.and_then(|enum_name| self.0.get(enum_name))
.and_then(|regex| if regex.is_match(variant_name) {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
})
}
}
let builder = Builder::get_initial_builder()
.enable_cxx_namespaces()
.with_codegen_config(CodegenConfig {
types: true,
vars: true,
..CodegenConfig::nothing()
});
let mut fixups = vec![];
let builder = BuilderWithConfig::new(builder, CONFIG["structs"].as_table().unwrap())
.handle_common(&mut fixups)
.handle_str_items("bitfield-enums", |b, item| b.bitfield_enum(item))
.handle_str_items("rusty-enums", |b, item| b.rustified_enum(item))
.handle_str_items("whitelist-vars", |b, item| b.whitelist_var(item))
.handle_str_items("whitelist-types", |b, item| b.whitelist_type(item))
.handle_str_items("opaque-types", |b, item| b.opaque_type(item))
.handle_list("constified-enum-variants", |builder, iter| {
let mut map = HashMap::new();
for item in iter {
let item = item.as_table().unwrap();
let name = item["enum"].as_str().unwrap();
let variants = item["variants"].as_array().unwrap().as_slice().iter()
.map(|item| item.as_str().unwrap());
map.insert(name.into(), RegexSet::new(variants).unwrap());
}
builder.parse_callbacks(Box::new(Callbacks(map)))
})
.handle_table_items("mapped-generic-types", |builder, item| {
let generic = item["generic"].as_bool().unwrap();
let gecko = item["gecko"].as_str().unwrap();
let servo = item["servo"].as_str().unwrap();
let gecko_name = gecko.rsplit("::").next().unwrap();
let gecko = gecko.split("::")
.map(|s| format!("\\s*{}\\s*", s))
.collect::<Vec<_>>()
.join("::");
fixups.push(Fixup {
pat: format!("\\broot\\s*::\\s*{}\\b", gecko),
rep: format!("::gecko_bindings::structs::{}", gecko_name)
});
builder.blacklist_type(gecko)
.raw_line(format!("pub type {0}{2} = {1}{2};", gecko_name, servo,
if generic { "<T>" } else { "" }))
})
.get_builder();
write_binding_file(builder, STRUCTS_FILE, &fixups);
}
fn setup_logging() -> bool {
use log;
struct BuildLogger {
file: Option<Mutex<fs::File>>,
filter: String,
}
impl log::Log for BuildLogger {
fn enabled(&self, meta: &log::LogMetadata) -> bool {
self.file.is_some() && meta.target().contains(&self.filter)
}
fn log(&self, record: &log::LogRecord) {
if!self.enabled(record.metadata()) {
return;
}
let mut file = self.file.as_ref().unwrap().lock().unwrap();
let _ =
writeln!(file, "{} - {} - {} @ {}:{}",
record.level(),
record.target(),
record.args(),
record.location().file(),
record.location().line());
}
}
if let Some(path) = env::var_os("STYLO_BUILD_LOG") {
log::set_logger(|log_level| {
log_level.set(log::LogLevelFilter::Debug);
Box::new(BuildLogger {
file: fs::File::create(path).ok().map(Mutex::new),
filter: env::var("STYLO_BUILD_FILTER").ok()
.unwrap_or_else(|| "bindgen".to_owned()),
})
})
.expect("Failed to set logger.");
true
} else {
false
}
}
fn generate_bindings() {
let builder = Builder::get_initial_builder()
.disable_name_namespacing()
.with_codegen_config(CodegenConfig {
functions: true,
..CodegenConfig::nothing()
});
let config = CONFIG["bindings"].as_table().unwrap();
let mut structs_types = HashSet::new();
let mut fixups = vec![];
let mut builder = BuilderWithConfig::new(builder, config)
.handle_common(&mut fixups)
.handle_str_items("whitelist-functions", |b, item| b.whitelist_function(item))
.handle_str_items("structs-types", |mut builder, ty| {
builder = builder.blacklist_type(ty)
.raw_line(format!("use gecko_bindings::structs::{};", ty));
structs_types.insert(ty);
// TODO this is hacky, figure out a better way to do it without
// hardcoding everything...
if ty.starts_with("nsStyle") {
builder = builder
.raw_line(format!("unsafe impl Send for {} {{}}", ty))
.raw_line(format!("unsafe impl Sync for {} {{}}", ty));
}
builder
})
// TODO This was added due to servo/rust-bindgen#75, but
// that has been fixed in clang 4.0+. When we switch people
// to libclang 4.0, we can remove this.
.handle_table_items("array-types", |builder, item| {
let cpp_type = item["cpp-type"].as_str().unwrap();
let rust_type = item["rust-type"].as_str().unwrap();
builder
.raw_line(format!(concat!("pub type nsTArrayBorrowed_{}<'a> = ",
"&'a mut ::gecko_bindings::structs::nsTArray<{}>;"),
cpp_type, rust_type))
})
.handle_table_items("servo-owned-types", |mut builder, item| {
let name = item["name"].as_str().unwrap();
builder = builder.blacklist_type(format!("{}Owned", name))
.raw_line(format!("pub type {0}Owned = ::gecko_bindings::sugar::ownership::Owned<{0}>;", name))
.blacklist_type(format!("{}OwnedOrNull", name))
.raw_line(format!(concat!("pub type {0}OwnedOrNull = ",
"::gecko_bindings::sugar::ownership::OwnedOrNull<{0}>;"), name))
.mutable_borrowed_type(name);
if item["opaque"].as_bool().unwrap() {
builder = builder.zero_size_type(name, &structs_types);
}
builder
})
.handle_str_items("servo-immutable-borrow-types", |b, ty| b.borrowed_type(ty))
// Right now the only immutable borrow types are ones which we import
// from the |structs| module. As such, we don't need to create an opaque
// type with zero_size_type. If we ever introduce immutable borrow types
// which _do_ need to be opaque, we'll need a separate mode.
.handle_str_items("servo-borrow-types", |b, ty| b.mutable_borrowed_type(ty))
.get_builder();
for ty in get_arc_types().iter() {
builder = builder
.blacklist_type(format!("{}Strong", ty))
.raw_line(format!("pub type {0}Strong = ::gecko_bindings::sugar::ownership::Strong<{0}>;", ty))
.borrowed_type(ty)
.zero_size_type(ty, &structs_types);
}
write_binding_file(builder, BINDINGS_FILE, &fixups);
}
fn generate_atoms() {
let script = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("gecko").join("regen_atoms.py");
println!("cargo:rerun-if-changed={}", script.display());
let status = Command::new(&*PYTHON)
.arg(&script)
.arg(DISTDIR_PATH.as_os_str())
.arg(OUTDIR_PATH.as_os_str())
.status()
.unwrap();
if!status.success() {
exit(1);
}
}
pub fn
|
generate
|
identifier_name
|
|
root.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (tabs, addons, browser chrome, etc.)
use actor::{Actor, ActorRegistry};
use actors::tab::{TabActor, TabActorMsg};
use protocol::JsonPacketStream;
use serialize::json;
use std::io::TcpStream;
#[deriving(Encodable)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: Vec<String>,
}
#[deriving(Encodable)]
struct ErrorReply {
from: String,
error: String,
message: String,
}
#[deriving(Encodable)]
struct ListTabsReply {
from: String,
selected: uint,
tabs: Vec<TabActorMsg>,
}
#[deriving(Encodable)]
struct
|
{
from: String,
applicationType: String,
traits: ActorTraits,
}
pub struct RootActor {
pub tabs: Vec<String>,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_string()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &String,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type.as_slice() {
"listAddons" => {
let actor = ErrorReply {
from: "root".to_string(),
error: "noAddons".to_string(),
message: "This root actor has no browser addons.".to_string(),
};
stream.write_json_packet(&actor);
true
}
//https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_string(),
selected: 0,
tabs: self.tabs.iter().map(|tab| {
registry.find::<TabActor>(tab.as_slice()).encodable()
}).collect()
};
stream.write_json_packet(&actor);
true
}
_ => false
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_string(),
applicationType: "browser".to_string(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: vec!("BoxModelHighlighter".to_string()),
},
}
}
}
|
RootActorMsg
|
identifier_name
|
root.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
|
/// that perform more specific actions (tabs, addons, browser chrome, etc.)
use actor::{Actor, ActorRegistry};
use actors::tab::{TabActor, TabActorMsg};
use protocol::JsonPacketStream;
use serialize::json;
use std::io::TcpStream;
#[deriving(Encodable)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: Vec<String>,
}
#[deriving(Encodable)]
struct ErrorReply {
from: String,
error: String,
message: String,
}
#[deriving(Encodable)]
struct ListTabsReply {
from: String,
selected: uint,
tabs: Vec<TabActorMsg>,
}
#[deriving(Encodable)]
struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
pub struct RootActor {
pub tabs: Vec<String>,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_string()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &String,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type.as_slice() {
"listAddons" => {
let actor = ErrorReply {
from: "root".to_string(),
error: "noAddons".to_string(),
message: "This root actor has no browser addons.".to_string(),
};
stream.write_json_packet(&actor);
true
}
//https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_string(),
selected: 0,
tabs: self.tabs.iter().map(|tab| {
registry.find::<TabActor>(tab.as_slice()).encodable()
}).collect()
};
stream.write_json_packet(&actor);
true
}
_ => false
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_string(),
applicationType: "browser".to_string(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: vec!("BoxModelHighlighter".to_string()),
},
}
}
}
|
/// Connection point for all new remote devtools interactions, providing lists of know actors
|
random_line_split
|
trait-inheritance-num0.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Extending Num and using inherited static methods
use std::num::NumCast;
trait Num {
fn from_int(i: int) -> Self;
fn gt(&self, other: &Self) -> bool;
}
pub trait NumExt: Num + NumCast { }
fn greater_than_one<T:NumExt>(n: &T) -> bool {
n.gt(&NumCast::from(1i).unwrap())
}
pub fn
|
() {}
|
main
|
identifier_name
|
trait-inheritance-num0.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Extending Num and using inherited static methods
use std::num::NumCast;
trait Num {
fn from_int(i: int) -> Self;
fn gt(&self, other: &Self) -> bool;
}
pub trait NumExt: Num + NumCast { }
fn greater_than_one<T:NumExt>(n: &T) -> bool {
n.gt(&NumCast::from(1i).unwrap())
}
pub fn main() {}
|
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
trait-inheritance-num0.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Extending Num and using inherited static methods
use std::num::NumCast;
trait Num {
fn from_int(i: int) -> Self;
fn gt(&self, other: &Self) -> bool;
}
pub trait NumExt: Num + NumCast { }
fn greater_than_one<T:NumExt>(n: &T) -> bool
|
pub fn main() {}
|
{
n.gt(&NumCast::from(1i).unwrap())
}
|
identifier_body
|
struct_visibility.rs
|
/*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
mod my {
#[derive(Debug)]
pub struct
|
{ // pub for made the struct accessible
pub i: u8, // pub for made the field accessible
j: u8, // inaccessible
}
impl Test {
pub fn new() -> Test { // accessible
Test {
i: 0,
j: 0,
}
}
fn test() { // inaccessible
}
}
}
use my::Test; // removes the necessite to call Test like my::Test
fn main() {
let test: Test = Test::new();
}
|
Test
|
identifier_name
|
struct_visibility.rs
|
/*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
mod my {
#[derive(Debug)]
pub struct Test { // pub for made the struct accessible
pub i: u8, // pub for made the field accessible
j: u8, // inaccessible
}
impl Test {
pub fn new() -> Test
|
fn test() { // inaccessible
}
}
}
use my::Test; // removes the necessite to call Test like my::Test
fn main() {
let test: Test = Test::new();
}
|
{ // accessible
Test {
i: 0,
j: 0,
}
}
|
identifier_body
|
struct_visibility.rs
|
/*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
mod my {
#[derive(Debug)]
pub struct Test { // pub for made the struct accessible
pub i: u8, // pub for made the field accessible
j: u8, // inaccessible
}
impl Test {
|
}
}
fn test() { // inaccessible
}
}
}
use my::Test; // removes the necessite to call Test like my::Test
fn main() {
let test: Test = Test::new();
}
|
pub fn new() -> Test { // accessible
Test {
i: 0,
j: 0,
|
random_line_split
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Trait Resolution. See the Book for more.
pub use self::SelectionError::*;
pub use self::FulfillmentErrorCode::*;
pub use self::Vtable::*;
pub use self::ObligationCauseCode::*;
use middle::free_region::FreeRegionMap;
use middle::subst;
use middle::ty::{self, HasTypeFlags, Ty};
use middle::ty_fold::TypeFoldable;
use middle::infer::{self, fixup_err_to_string, InferCtxt};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
pub use self::error_reporting::report_fulfillment_errors;
pub use self::error_reporting::report_overflow_error;
pub use self::error_reporting::report_selection_error;
pub use self::error_reporting::suggest_new_overflow_limit;
pub use self::coherence::orphan_check;
pub use self::coherence::overlapping_impls;
pub use self::coherence::OrphanCheckErr;
pub use self::fulfill::{FulfillmentContext, FulfilledPredicates, RegionObligation};
pub use self::project::MismatchedProjectionTypes;
pub use self::project::normalize;
pub use self::project::Normalized;
pub use self::object_safety::is_object_safe;
pub use self::object_safety::object_safety_violations;
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
pub use self::object_safety::is_vtable_safe_method;
pub use self::select::SelectionContext;
pub use self::select::SelectionCache;
pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
pub use self::select::{MethodMatchedData}; // intentionally don't export variants
pub use self::util::elaborate_predicates;
pub use self::util::get_vtable_index_of_object_method;
pub use self::util::trait_ref_for_builtin_bound;
pub use self::util::predicate_for_trait_def;
pub use self::util::supertraits;
pub use self::util::Supertraits;
pub use self::util::supertrait_def_ids;
pub use self::util::SupertraitDefIds;
pub use self::util::transitive_bounds;
pub use self::util::upcast;
mod coherence;
mod error_reporting;
mod fulfill;
mod project;
mod object_safety;
mod select;
mod util;
/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
/// which the vtable must be found. The process of finding a vtable is
/// called "resolving" the `Obligation`. This process consists of
/// either identifying an `impl` (e.g., `impl Eq for int`) that
/// provides the required vtable, or else finding a bound that is in
/// scope. The eventual result is usually a `Selection` (defined below).
#[derive(Clone, PartialEq, Eq)]
pub struct Obligation<'tcx, T> {
pub cause: ObligationCause<'tcx>,
pub recursion_depth: usize,
pub predicate: T,
}
pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
/// Why did we incur this obligation? Used for error reporting.
#[derive(Clone, PartialEq, Eq)]
pub struct ObligationCause<'tcx> {
pub span: Span,
// The id of the fn body that triggered this obligation. This is
// used for region obligations to determine the precise
// environment in which the region obligation should be evaluated
// (in particular, closures can add new assumptions). See the
// field `region_obligations` of the `FulfillmentContext` for more
// information.
pub body_id: ast::NodeId,
pub code: ObligationCauseCode<'tcx>
}
#[derive(Clone, PartialEq, Eq)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
MiscObligation,
/// In an impl of trait X for type Y, type Y must
/// also implement all supertraits of X.
ItemObligation(ast::DefId),
/// Obligation incurred due to an object cast.
ObjectCastObligation(/* Object type */ Ty<'tcx>),
/// Various cases where expressions must be sized/copy/etc:
AssignmentLhsSized, // L = X implies that L is Sized
StructInitializerSized, // S {... } must be Sized
VariableType(ast::NodeId), // Type of each variable must be Sized
ReturnType, // Return type must be Sized
RepeatVec, // [T,..n] --> T must be Copy
// Captures of variable the given id by a closure (span is the
// span of the closure)
ClosureCapture(ast::NodeId, Span, ty::BuiltinBound),
// Types of fields (other than the last) in a struct must be sized.
FieldSized,
// static items must have `Sync` type
SharedStatic,
BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
ImplDerivedObligation(DerivedObligationCause<'tcx>),
CompareImplMethodObligation,
}
#[derive(Clone, PartialEq, Eq)]
pub struct DerivedObligationCause<'tcx> {
/// The trait reference of the parent obligation that led to the
/// current obligation. Note that only trait obligations lead to
/// derived obligations, so we just store the trait reference here
/// directly.
parent_trait_ref: ty::PolyTraitRef<'tcx>,
/// The parent trait had this cause
parent_code: Rc<ObligationCauseCode<'tcx>>
}
pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
pub type TraitObligations<'tcx> = Vec<TraitObligation<'tcx>>;
pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>;
#[derive(Clone,Debug)]
pub enum SelectionError<'tcx> {
Unimplemented,
OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
ty::PolyTraitRef<'tcx>,
ty::TypeError<'tcx>),
TraitNotObjectSafe(ast::DefId),
}
pub struct FulfillmentError<'tcx> {
pub obligation: PredicateObligation<'tcx>,
pub code: FulfillmentErrorCode<'tcx>
}
#[derive(Clone)]
pub enum FulfillmentErrorCode<'tcx> {
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
CodeAmbiguity,
}
/// When performing resolution, it is typically the case that there
/// can be one of three outcomes:
///
/// - `Ok(Some(r))`: success occurred with result `r`
/// - `Ok(None)`: could not definitely determine anything, usually due
/// to inconclusive type inference.
/// - `Err(e)`: error `e` occurred
pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/// Given the successful resolution of an obligation, the `Vtable`
/// indicates where the vtable comes from. Note that while we call this
/// a "vtable", it does not necessarily indicate dynamic dispatch at
/// runtime. `Vtable` instances just tell the compiler where to find
/// methods, but in generic code those methods are typically statically
/// dispatched -- only when an object is constructed is a `Vtable`
/// instance reified into an actual vtable.
///
/// For example, the vtable may be tied to a specific impl (case A),
/// or it may be relative to some bound that is in scope (case B).
///
///
/// ```
/// impl<T:Clone> Clone<T> for Option<T> {... } // Impl_1
/// impl<T:Clone> Clone<T> for Box<T> {... } // Impl_2
/// impl Clone for int {... } // Impl_3
///
/// fn foo<T:Clone>(concrete: Option<Box<int>>,
/// param: T,
/// mixed: Option<T>) {
///
/// // Case A: Vtable points at a specific impl. Only possible when
/// // type is concretely known. If the impl itself has bounded
/// // type parameters, Vtable will carry resolutions for those as well:
/// concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
///
/// // Case B: Vtable must be provided by caller. This applies when
/// // type is a type parameter.
/// param.clone(); // VtableParam
///
/// // Case C: A mix of cases A and B.
/// mixed.clone(); // Vtable(Impl_1, [VtableParam])
/// }
/// ```
///
/// ### The type parameter `N`
///
/// See explanation on `VtableImplData`.
#[derive(Clone)]
pub enum Vtable<'tcx, N> {
/// Vtable identifying a particular impl.
VtableImpl(VtableImplData<'tcx, N>),
/// Vtable for default trait implementations
/// This carries the information and nested obligations with regards
/// to a default implementation for a trait `Trait`. The nested obligations
/// ensure the trait implementation holds for all the constituent types.
VtableDefaultImpl(VtableDefaultImplData<N>),
/// Successful resolution to an obligation provided by the caller
/// for some type parameter. The `Vec<N>` represents the
/// obligations incurred from normalizing the where-clause (if
/// any).
VtableParam(Vec<N>),
/// Virtual calls through an object
VtableObject(VtableObjectData<'tcx>),
/// Successful resolution for a builtin trait.
VtableBuiltin(VtableBuiltinData<N>),
/// Vtable automatically generated for a closure. The def ID is the ID
/// of the closure expression. This is a `VtableImpl` in spirit, but the
/// impl is generated by the compiler and does not appear in the source.
VtableClosure(VtableClosureData<'tcx, N>),
/// Same as above, but for a fn pointer type with the given signature.
VtableFnPointer(ty::Ty<'tcx>),
}
/// Identifies a particular impl in the source, along with a set of
/// substitutions from the impl's type/lifetime parameters. The
/// `nested` vector corresponds to the nested obligations attached to
/// the impl's type parameters.
///
/// The type parameter `N` indicates the type used for "nested
/// obligations" that are required by the impl. During type check, this
/// is `Obligation`, as one might expect. During trans, however, this
/// is `()`, because trans only requires a shallow resolution of an
/// impl, and nested obligations are satisfied later.
#[derive(Clone, PartialEq, Eq)]
pub struct VtableImplData<'tcx, N> {
pub impl_def_id: ast::DefId,
pub substs: subst::Substs<'tcx>,
pub nested: Vec<N>
}
#[derive(Clone, PartialEq, Eq)]
pub struct VtableClosureData<'tcx, N> {
pub closure_def_id: ast::DefId,
pub substs: subst::Substs<'tcx>,
/// Nested obligations. This can be non-empty if the closure
/// signature contains associated types.
pub nested: Vec<N>
}
#[derive(Clone)]
pub struct VtableDefaultImplData<N> {
pub trait_def_id: ast::DefId,
pub nested: Vec<N>
}
#[derive(Clone)]
pub struct VtableBuiltinData<N> {
pub nested: Vec<N>
}
/// A vtable for some object-safe trait `Foo` automatically derived
/// for the object type `Foo`.
#[derive(PartialEq,Eq,Clone)]
pub struct VtableObjectData<'tcx> {
/// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
/// The vtable is formed by concatenating together the method lists of
/// the base object trait and all supertraits; this is the start of
/// `upcast_trait_ref`'s methods in that vtable.
pub vtable_base: usize
}
/// Creates predicate obligations from the generic bounds.
pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-> PredicateObligations<'tcx>
{
util::predicates_for_generics(cause, 0, generic_bounds)
}
/// Determines whether the type `ty` is known to meet `bound` and
/// returns true if so. Returns false if `ty` either does not meet
/// `bound` or is not known to meet bound (note that this is
/// conservative towards *no impl*, which is the opposite of the
/// `evaluate` methods).
pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
ty: Ty<'tcx>,
bound: ty::BuiltinBound,
span: Span)
-> bool
{
debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})",
ty,
bound);
let mut fulfill_cx = FulfillmentContext::new(false);
// We can use a dummy node-id here because we won't pay any mind
// to region obligations that arise (there shouldn't really be any
// anyhow).
let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
fulfill_cx.register_builtin_bound(infcx, ty, bound, cause);
// Note: we only assume something is `Copy` if we can
// *definitively* show that it implements `Copy`. Otherwise,
// assume it is move; linear is always ok.
match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success",
ty,
bound);
true
}
Err(e) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} errors={:?}",
ty,
bound,
e);
false
}
}
}
// FIXME: this is gonna need to be removed...
/// Normalizes the parameter environment, reporting errors if they occur.
pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
cause: ObligationCause<'tcx>)
-> ty::ParameterEnvironment<'a,'tcx>
{
// I'm not wild about reporting errors here; I'd prefer to
// have the errors get reported at a defined place (e.g.,
// during typeck). Instead I have all parameter
// environments, in effect, going through this function
// and hence potentially reporting errors. This ensurse of
// course that we never forget to normalize (the
// alternative seemed like it would involve a lot of
// manual invocations of this fn -- and then we'd have to
// deal with the errors at each of those sites).
//
// In any case, in practice, typeck constructs all the
// parameter environments once for every fn as it goes,
// and errors will get reported then; so after typeck we
// can be sure that no errors should occur.
let tcx = unnormalized_env.tcx;
|
unnormalized_env);
let predicates: Vec<_> =
util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone())
.filter(|p|!p.is_global()) // (*)
.collect();
// (*) Any predicate like `i32: Trait<u32>` or whatever doesn't
// need to be in the *environment* to be proven, so screen those
// out. This is important for the soundness of inter-fn
// caching. Note though that we should probably check that these
// predicates hold at the point where the environment is
// constructed, but I am not currently doing so out of laziness.
// -nmatsakis
debug!("normalize_param_env_or_error: elaborated-predicates={:?}",
predicates);
let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(elaborated_env), false);
let predicates = match fully_normalize(&infcx, cause,
&infcx.parameter_environment.caller_bounds) {
Ok(predicates) => predicates,
Err(errors) => {
report_fulfillment_errors(&infcx, &errors);
return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, body_id);
let predicates = match infcx.fully_resolve(&predicates) {
Ok(predicates) => predicates,
Err(fixup_err) => {
// If we encounter a fixup error, it means that some type
// variable wound up unconstrained. I actually don't know
// if this can happen, and I certainly don't expect it to
// happen often, but if it did happen it probably
// represents a legitimate failure due to some kind of
// unconstrained variable, and it seems better not to ICE,
// all things considered.
let err_msg = fixup_err_to_string(fixup_err);
tcx.sess.span_err(span, &err_msg);
return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
infcx.parameter_environment.with_caller_bounds(predicates)
}
pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
cause: ObligationCause<'tcx>,
value: &T)
-> Result<T, Vec<FulfillmentError<'tcx>>>
where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_param_env(value={:?})", value);
let mut selcx = &mut SelectionContext::new(infcx);
// FIXME (@jroesch) ISSUE 26721
// I'm not sure if this is a bug or not, needs further investigation.
// It appears that by reusing the fulfillment_cx here we incur more
// obligations and later trip an asssertion on regionck.rs line 337.
//
// The two possibilities I see is:
// - normalization is not actually fully happening and we
// have a bug else where
// - we are adding a duplicate bound into the list causing
// its size to change.
//
// I think we should probably land this refactor and then come
// back to this is a follow-up patch.
let mut fulfill_cx = FulfillmentContext::new(false);
let Normalized { value: normalized_value, obligations } =
project::normalize(selcx, cause, value);
debug!("normalize_param_env: normalized_value={:?} obligations={:?}",
normalized_value,
obligations);
for obligation in obligations {
fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
}
try!(fulfill_cx.select_all_or_error(infcx));
let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
debug!("normalize_param_env: resolved_value={:?}", resolved_value);
Ok(resolved_value)
}
impl<'tcx,O> Obligation<'tcx,O> {
pub fn new(cause: ObligationCause<'tcx>,
trait_ref: O)
-> Obligation<'tcx, O>
{
Obligation { cause: cause,
recursion_depth: 0,
predicate: trait_ref }
}
fn with_depth(cause: ObligationCause<'tcx>,
recursion_depth: usize,
trait_ref: O)
-> Obligation<'tcx, O>
{
Obligation { cause: cause,
recursion_depth: recursion_depth,
predicate: trait_ref }
}
pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> {
Obligation::new(ObligationCause::misc(span, body_id), trait_ref)
}
pub fn with<P>(&self, value: P) -> Obligation<'tcx,P> {
Obligation { cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
predicate: value }
}
}
impl<'tcx> ObligationCause<'tcx> {
pub fn new(span: Span,
body_id: ast::NodeId,
code: ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx> {
ObligationCause { span: span, body_id: body_id, code: code }
}
pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> {
ObligationCause { span: span, body_id: body_id, code: MiscObligation }
}
pub fn dummy() -> ObligationCause<'tcx> {
ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation }
}
}
impl<'tcx, N> Vtable<'tcx, N> {
pub fn nested_obligations(self) -> Vec<N> {
match self {
VtableImpl(i) => i.nested,
VtableParam(n) => n,
VtableBuiltin(i) => i.nested,
VtableDefaultImpl(d) => d.nested,
VtableClosure(c) => c.nested,
VtableObject(_) | VtableFnPointer(..) => vec![]
}
}
pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M {
match self {
VtableImpl(i) => VtableImpl(VtableImplData {
impl_def_id: i.impl_def_id,
substs: i.substs,
nested: i.nested.into_iter().map(f).collect()
}),
VtableParam(n) => VtableParam(n.into_iter().map(f).collect()),
VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData {
nested: i.nested.into_iter().map(f).collect()
}),
VtableObject(o) => VtableObject(o),
VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData {
trait_def_id: d.trait_def_id,
nested: d.nested.into_iter().map(f).collect()
}),
VtableFnPointer(f) => VtableFnPointer(f),
VtableClosure(c) => VtableClosure(VtableClosureData {
closure_def_id: c.closure_def_id,
substs: c.substs,
nested: c.nested.into_iter().map(f).collect()
})
}
}
}
impl<'tcx> FulfillmentError<'tcx> {
fn new(obligation: PredicateObligation<'tcx>,
code: FulfillmentErrorCode<'tcx>)
-> FulfillmentError<'tcx>
{
FulfillmentError { obligation: obligation, code: code }
}
}
impl<'tcx> TraitObligation<'tcx> {
fn self_ty(&self) -> ty::Binder<Ty<'tcx>> {
ty::Binder(self.predicate.skip_binder().self_ty())
}
}
|
let span = cause.span;
let body_id = cause.body_id;
debug!("normalize_param_env_or_error(unnormalized_env={:?})",
|
random_line_split
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Trait Resolution. See the Book for more.
pub use self::SelectionError::*;
pub use self::FulfillmentErrorCode::*;
pub use self::Vtable::*;
pub use self::ObligationCauseCode::*;
use middle::free_region::FreeRegionMap;
use middle::subst;
use middle::ty::{self, HasTypeFlags, Ty};
use middle::ty_fold::TypeFoldable;
use middle::infer::{self, fixup_err_to_string, InferCtxt};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
pub use self::error_reporting::report_fulfillment_errors;
pub use self::error_reporting::report_overflow_error;
pub use self::error_reporting::report_selection_error;
pub use self::error_reporting::suggest_new_overflow_limit;
pub use self::coherence::orphan_check;
pub use self::coherence::overlapping_impls;
pub use self::coherence::OrphanCheckErr;
pub use self::fulfill::{FulfillmentContext, FulfilledPredicates, RegionObligation};
pub use self::project::MismatchedProjectionTypes;
pub use self::project::normalize;
pub use self::project::Normalized;
pub use self::object_safety::is_object_safe;
pub use self::object_safety::object_safety_violations;
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
pub use self::object_safety::is_vtable_safe_method;
pub use self::select::SelectionContext;
pub use self::select::SelectionCache;
pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
pub use self::select::{MethodMatchedData}; // intentionally don't export variants
pub use self::util::elaborate_predicates;
pub use self::util::get_vtable_index_of_object_method;
pub use self::util::trait_ref_for_builtin_bound;
pub use self::util::predicate_for_trait_def;
pub use self::util::supertraits;
pub use self::util::Supertraits;
pub use self::util::supertrait_def_ids;
pub use self::util::SupertraitDefIds;
pub use self::util::transitive_bounds;
pub use self::util::upcast;
mod coherence;
mod error_reporting;
mod fulfill;
mod project;
mod object_safety;
mod select;
mod util;
/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
/// which the vtable must be found. The process of finding a vtable is
/// called "resolving" the `Obligation`. This process consists of
/// either identifying an `impl` (e.g., `impl Eq for int`) that
/// provides the required vtable, or else finding a bound that is in
/// scope. The eventual result is usually a `Selection` (defined below).
#[derive(Clone, PartialEq, Eq)]
pub struct Obligation<'tcx, T> {
pub cause: ObligationCause<'tcx>,
pub recursion_depth: usize,
pub predicate: T,
}
pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
/// Why did we incur this obligation? Used for error reporting.
#[derive(Clone, PartialEq, Eq)]
pub struct ObligationCause<'tcx> {
pub span: Span,
// The id of the fn body that triggered this obligation. This is
// used for region obligations to determine the precise
// environment in which the region obligation should be evaluated
// (in particular, closures can add new assumptions). See the
// field `region_obligations` of the `FulfillmentContext` for more
// information.
pub body_id: ast::NodeId,
pub code: ObligationCauseCode<'tcx>
}
#[derive(Clone, PartialEq, Eq)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
MiscObligation,
/// In an impl of trait X for type Y, type Y must
/// also implement all supertraits of X.
ItemObligation(ast::DefId),
/// Obligation incurred due to an object cast.
ObjectCastObligation(/* Object type */ Ty<'tcx>),
/// Various cases where expressions must be sized/copy/etc:
AssignmentLhsSized, // L = X implies that L is Sized
StructInitializerSized, // S {... } must be Sized
VariableType(ast::NodeId), // Type of each variable must be Sized
ReturnType, // Return type must be Sized
RepeatVec, // [T,..n] --> T must be Copy
// Captures of variable the given id by a closure (span is the
// span of the closure)
ClosureCapture(ast::NodeId, Span, ty::BuiltinBound),
// Types of fields (other than the last) in a struct must be sized.
FieldSized,
// static items must have `Sync` type
SharedStatic,
BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
ImplDerivedObligation(DerivedObligationCause<'tcx>),
CompareImplMethodObligation,
}
#[derive(Clone, PartialEq, Eq)]
pub struct DerivedObligationCause<'tcx> {
/// The trait reference of the parent obligation that led to the
/// current obligation. Note that only trait obligations lead to
/// derived obligations, so we just store the trait reference here
/// directly.
parent_trait_ref: ty::PolyTraitRef<'tcx>,
/// The parent trait had this cause
parent_code: Rc<ObligationCauseCode<'tcx>>
}
pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
pub type TraitObligations<'tcx> = Vec<TraitObligation<'tcx>>;
pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>;
#[derive(Clone,Debug)]
pub enum SelectionError<'tcx> {
Unimplemented,
OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
ty::PolyTraitRef<'tcx>,
ty::TypeError<'tcx>),
TraitNotObjectSafe(ast::DefId),
}
pub struct FulfillmentError<'tcx> {
pub obligation: PredicateObligation<'tcx>,
pub code: FulfillmentErrorCode<'tcx>
}
#[derive(Clone)]
pub enum FulfillmentErrorCode<'tcx> {
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
CodeAmbiguity,
}
/// When performing resolution, it is typically the case that there
/// can be one of three outcomes:
///
/// - `Ok(Some(r))`: success occurred with result `r`
/// - `Ok(None)`: could not definitely determine anything, usually due
/// to inconclusive type inference.
/// - `Err(e)`: error `e` occurred
pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/// Given the successful resolution of an obligation, the `Vtable`
/// indicates where the vtable comes from. Note that while we call this
/// a "vtable", it does not necessarily indicate dynamic dispatch at
/// runtime. `Vtable` instances just tell the compiler where to find
/// methods, but in generic code those methods are typically statically
/// dispatched -- only when an object is constructed is a `Vtable`
/// instance reified into an actual vtable.
///
/// For example, the vtable may be tied to a specific impl (case A),
/// or it may be relative to some bound that is in scope (case B).
///
///
/// ```
/// impl<T:Clone> Clone<T> for Option<T> {... } // Impl_1
/// impl<T:Clone> Clone<T> for Box<T> {... } // Impl_2
/// impl Clone for int {... } // Impl_3
///
/// fn foo<T:Clone>(concrete: Option<Box<int>>,
/// param: T,
/// mixed: Option<T>) {
///
/// // Case A: Vtable points at a specific impl. Only possible when
/// // type is concretely known. If the impl itself has bounded
/// // type parameters, Vtable will carry resolutions for those as well:
/// concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
///
/// // Case B: Vtable must be provided by caller. This applies when
/// // type is a type parameter.
/// param.clone(); // VtableParam
///
/// // Case C: A mix of cases A and B.
/// mixed.clone(); // Vtable(Impl_1, [VtableParam])
/// }
/// ```
///
/// ### The type parameter `N`
///
/// See explanation on `VtableImplData`.
#[derive(Clone)]
pub enum Vtable<'tcx, N> {
/// Vtable identifying a particular impl.
VtableImpl(VtableImplData<'tcx, N>),
/// Vtable for default trait implementations
/// This carries the information and nested obligations with regards
/// to a default implementation for a trait `Trait`. The nested obligations
/// ensure the trait implementation holds for all the constituent types.
VtableDefaultImpl(VtableDefaultImplData<N>),
/// Successful resolution to an obligation provided by the caller
/// for some type parameter. The `Vec<N>` represents the
/// obligations incurred from normalizing the where-clause (if
/// any).
VtableParam(Vec<N>),
/// Virtual calls through an object
VtableObject(VtableObjectData<'tcx>),
/// Successful resolution for a builtin trait.
VtableBuiltin(VtableBuiltinData<N>),
/// Vtable automatically generated for a closure. The def ID is the ID
/// of the closure expression. This is a `VtableImpl` in spirit, but the
/// impl is generated by the compiler and does not appear in the source.
VtableClosure(VtableClosureData<'tcx, N>),
/// Same as above, but for a fn pointer type with the given signature.
VtableFnPointer(ty::Ty<'tcx>),
}
/// Identifies a particular impl in the source, along with a set of
/// substitutions from the impl's type/lifetime parameters. The
/// `nested` vector corresponds to the nested obligations attached to
/// the impl's type parameters.
///
/// The type parameter `N` indicates the type used for "nested
/// obligations" that are required by the impl. During type check, this
/// is `Obligation`, as one might expect. During trans, however, this
/// is `()`, because trans only requires a shallow resolution of an
/// impl, and nested obligations are satisfied later.
#[derive(Clone, PartialEq, Eq)]
pub struct VtableImplData<'tcx, N> {
pub impl_def_id: ast::DefId,
pub substs: subst::Substs<'tcx>,
pub nested: Vec<N>
}
#[derive(Clone, PartialEq, Eq)]
pub struct VtableClosureData<'tcx, N> {
pub closure_def_id: ast::DefId,
pub substs: subst::Substs<'tcx>,
/// Nested obligations. This can be non-empty if the closure
/// signature contains associated types.
pub nested: Vec<N>
}
#[derive(Clone)]
pub struct VtableDefaultImplData<N> {
pub trait_def_id: ast::DefId,
pub nested: Vec<N>
}
#[derive(Clone)]
pub struct VtableBuiltinData<N> {
pub nested: Vec<N>
}
/// A vtable for some object-safe trait `Foo` automatically derived
/// for the object type `Foo`.
#[derive(PartialEq,Eq,Clone)]
pub struct VtableObjectData<'tcx> {
/// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
/// The vtable is formed by concatenating together the method lists of
/// the base object trait and all supertraits; this is the start of
/// `upcast_trait_ref`'s methods in that vtable.
pub vtable_base: usize
}
/// Creates predicate obligations from the generic bounds.
pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-> PredicateObligations<'tcx>
{
util::predicates_for_generics(cause, 0, generic_bounds)
}
/// Determines whether the type `ty` is known to meet `bound` and
/// returns true if so. Returns false if `ty` either does not meet
/// `bound` or is not known to meet bound (note that this is
/// conservative towards *no impl*, which is the opposite of the
/// `evaluate` methods).
pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
ty: Ty<'tcx>,
bound: ty::BuiltinBound,
span: Span)
-> bool
{
debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})",
ty,
bound);
let mut fulfill_cx = FulfillmentContext::new(false);
// We can use a dummy node-id here because we won't pay any mind
// to region obligations that arise (there shouldn't really be any
// anyhow).
let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
fulfill_cx.register_builtin_bound(infcx, ty, bound, cause);
// Note: we only assume something is `Copy` if we can
// *definitively* show that it implements `Copy`. Otherwise,
// assume it is move; linear is always ok.
match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success",
ty,
bound);
true
}
Err(e) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} errors={:?}",
ty,
bound,
e);
false
}
}
}
// FIXME: this is gonna need to be removed...
/// Normalizes the parameter environment, reporting errors if they occur.
pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
cause: ObligationCause<'tcx>)
-> ty::ParameterEnvironment<'a,'tcx>
{
// I'm not wild about reporting errors here; I'd prefer to
// have the errors get reported at a defined place (e.g.,
// during typeck). Instead I have all parameter
// environments, in effect, going through this function
// and hence potentially reporting errors. This ensurse of
// course that we never forget to normalize (the
// alternative seemed like it would involve a lot of
// manual invocations of this fn -- and then we'd have to
// deal with the errors at each of those sites).
//
// In any case, in practice, typeck constructs all the
// parameter environments once for every fn as it goes,
// and errors will get reported then; so after typeck we
// can be sure that no errors should occur.
let tcx = unnormalized_env.tcx;
let span = cause.span;
let body_id = cause.body_id;
debug!("normalize_param_env_or_error(unnormalized_env={:?})",
unnormalized_env);
let predicates: Vec<_> =
util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone())
.filter(|p|!p.is_global()) // (*)
.collect();
// (*) Any predicate like `i32: Trait<u32>` or whatever doesn't
// need to be in the *environment* to be proven, so screen those
// out. This is important for the soundness of inter-fn
// caching. Note though that we should probably check that these
// predicates hold at the point where the environment is
// constructed, but I am not currently doing so out of laziness.
// -nmatsakis
debug!("normalize_param_env_or_error: elaborated-predicates={:?}",
predicates);
let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(elaborated_env), false);
let predicates = match fully_normalize(&infcx, cause,
&infcx.parameter_environment.caller_bounds) {
Ok(predicates) => predicates,
Err(errors) => {
report_fulfillment_errors(&infcx, &errors);
return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, body_id);
let predicates = match infcx.fully_resolve(&predicates) {
Ok(predicates) => predicates,
Err(fixup_err) => {
// If we encounter a fixup error, it means that some type
// variable wound up unconstrained. I actually don't know
// if this can happen, and I certainly don't expect it to
// happen often, but if it did happen it probably
// represents a legitimate failure due to some kind of
// unconstrained variable, and it seems better not to ICE,
// all things considered.
let err_msg = fixup_err_to_string(fixup_err);
tcx.sess.span_err(span, &err_msg);
return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
infcx.parameter_environment.with_caller_bounds(predicates)
}
pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
cause: ObligationCause<'tcx>,
value: &T)
-> Result<T, Vec<FulfillmentError<'tcx>>>
where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_param_env(value={:?})", value);
let mut selcx = &mut SelectionContext::new(infcx);
// FIXME (@jroesch) ISSUE 26721
// I'm not sure if this is a bug or not, needs further investigation.
// It appears that by reusing the fulfillment_cx here we incur more
// obligations and later trip an asssertion on regionck.rs line 337.
//
// The two possibilities I see is:
// - normalization is not actually fully happening and we
// have a bug else where
// - we are adding a duplicate bound into the list causing
// its size to change.
//
// I think we should probably land this refactor and then come
// back to this is a follow-up patch.
let mut fulfill_cx = FulfillmentContext::new(false);
let Normalized { value: normalized_value, obligations } =
project::normalize(selcx, cause, value);
debug!("normalize_param_env: normalized_value={:?} obligations={:?}",
normalized_value,
obligations);
for obligation in obligations {
fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
}
try!(fulfill_cx.select_all_or_error(infcx));
let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
debug!("normalize_param_env: resolved_value={:?}", resolved_value);
Ok(resolved_value)
}
impl<'tcx,O> Obligation<'tcx,O> {
pub fn new(cause: ObligationCause<'tcx>,
trait_ref: O)
-> Obligation<'tcx, O>
{
Obligation { cause: cause,
recursion_depth: 0,
predicate: trait_ref }
}
fn with_depth(cause: ObligationCause<'tcx>,
recursion_depth: usize,
trait_ref: O)
-> Obligation<'tcx, O>
{
Obligation { cause: cause,
recursion_depth: recursion_depth,
predicate: trait_ref }
}
pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> {
Obligation::new(ObligationCause::misc(span, body_id), trait_ref)
}
pub fn with<P>(&self, value: P) -> Obligation<'tcx,P> {
Obligation { cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
predicate: value }
}
}
impl<'tcx> ObligationCause<'tcx> {
pub fn new(span: Span,
body_id: ast::NodeId,
code: ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx> {
ObligationCause { span: span, body_id: body_id, code: code }
}
pub fn
|
(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> {
ObligationCause { span: span, body_id: body_id, code: MiscObligation }
}
pub fn dummy() -> ObligationCause<'tcx> {
ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation }
}
}
impl<'tcx, N> Vtable<'tcx, N> {
pub fn nested_obligations(self) -> Vec<N> {
match self {
VtableImpl(i) => i.nested,
VtableParam(n) => n,
VtableBuiltin(i) => i.nested,
VtableDefaultImpl(d) => d.nested,
VtableClosure(c) => c.nested,
VtableObject(_) | VtableFnPointer(..) => vec![]
}
}
pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M {
match self {
VtableImpl(i) => VtableImpl(VtableImplData {
impl_def_id: i.impl_def_id,
substs: i.substs,
nested: i.nested.into_iter().map(f).collect()
}),
VtableParam(n) => VtableParam(n.into_iter().map(f).collect()),
VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData {
nested: i.nested.into_iter().map(f).collect()
}),
VtableObject(o) => VtableObject(o),
VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData {
trait_def_id: d.trait_def_id,
nested: d.nested.into_iter().map(f).collect()
}),
VtableFnPointer(f) => VtableFnPointer(f),
VtableClosure(c) => VtableClosure(VtableClosureData {
closure_def_id: c.closure_def_id,
substs: c.substs,
nested: c.nested.into_iter().map(f).collect()
})
}
}
}
impl<'tcx> FulfillmentError<'tcx> {
fn new(obligation: PredicateObligation<'tcx>,
code: FulfillmentErrorCode<'tcx>)
-> FulfillmentError<'tcx>
{
FulfillmentError { obligation: obligation, code: code }
}
}
impl<'tcx> TraitObligation<'tcx> {
fn self_ty(&self) -> ty::Binder<Ty<'tcx>> {
ty::Binder(self.predicate.skip_binder().self_ty())
}
}
|
misc
|
identifier_name
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Trait Resolution. See the Book for more.
pub use self::SelectionError::*;
pub use self::FulfillmentErrorCode::*;
pub use self::Vtable::*;
pub use self::ObligationCauseCode::*;
use middle::free_region::FreeRegionMap;
use middle::subst;
use middle::ty::{self, HasTypeFlags, Ty};
use middle::ty_fold::TypeFoldable;
use middle::infer::{self, fixup_err_to_string, InferCtxt};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
pub use self::error_reporting::report_fulfillment_errors;
pub use self::error_reporting::report_overflow_error;
pub use self::error_reporting::report_selection_error;
pub use self::error_reporting::suggest_new_overflow_limit;
pub use self::coherence::orphan_check;
pub use self::coherence::overlapping_impls;
pub use self::coherence::OrphanCheckErr;
pub use self::fulfill::{FulfillmentContext, FulfilledPredicates, RegionObligation};
pub use self::project::MismatchedProjectionTypes;
pub use self::project::normalize;
pub use self::project::Normalized;
pub use self::object_safety::is_object_safe;
pub use self::object_safety::object_safety_violations;
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
pub use self::object_safety::is_vtable_safe_method;
pub use self::select::SelectionContext;
pub use self::select::SelectionCache;
pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
pub use self::select::{MethodMatchedData}; // intentionally don't export variants
pub use self::util::elaborate_predicates;
pub use self::util::get_vtable_index_of_object_method;
pub use self::util::trait_ref_for_builtin_bound;
pub use self::util::predicate_for_trait_def;
pub use self::util::supertraits;
pub use self::util::Supertraits;
pub use self::util::supertrait_def_ids;
pub use self::util::SupertraitDefIds;
pub use self::util::transitive_bounds;
pub use self::util::upcast;
mod coherence;
mod error_reporting;
mod fulfill;
mod project;
mod object_safety;
mod select;
mod util;
/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
/// which the vtable must be found. The process of finding a vtable is
/// called "resolving" the `Obligation`. This process consists of
/// either identifying an `impl` (e.g., `impl Eq for int`) that
/// provides the required vtable, or else finding a bound that is in
/// scope. The eventual result is usually a `Selection` (defined below).
#[derive(Clone, PartialEq, Eq)]
pub struct Obligation<'tcx, T> {
pub cause: ObligationCause<'tcx>,
pub recursion_depth: usize,
pub predicate: T,
}
pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
/// Why did we incur this obligation? Used for error reporting.
#[derive(Clone, PartialEq, Eq)]
pub struct ObligationCause<'tcx> {
pub span: Span,
// The id of the fn body that triggered this obligation. This is
// used for region obligations to determine the precise
// environment in which the region obligation should be evaluated
// (in particular, closures can add new assumptions). See the
// field `region_obligations` of the `FulfillmentContext` for more
// information.
pub body_id: ast::NodeId,
pub code: ObligationCauseCode<'tcx>
}
#[derive(Clone, PartialEq, Eq)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
MiscObligation,
/// In an impl of trait X for type Y, type Y must
/// also implement all supertraits of X.
ItemObligation(ast::DefId),
/// Obligation incurred due to an object cast.
ObjectCastObligation(/* Object type */ Ty<'tcx>),
/// Various cases where expressions must be sized/copy/etc:
AssignmentLhsSized, // L = X implies that L is Sized
StructInitializerSized, // S {... } must be Sized
VariableType(ast::NodeId), // Type of each variable must be Sized
ReturnType, // Return type must be Sized
RepeatVec, // [T,..n] --> T must be Copy
// Captures of variable the given id by a closure (span is the
// span of the closure)
ClosureCapture(ast::NodeId, Span, ty::BuiltinBound),
// Types of fields (other than the last) in a struct must be sized.
FieldSized,
// static items must have `Sync` type
SharedStatic,
BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
ImplDerivedObligation(DerivedObligationCause<'tcx>),
CompareImplMethodObligation,
}
#[derive(Clone, PartialEq, Eq)]
pub struct DerivedObligationCause<'tcx> {
/// The trait reference of the parent obligation that led to the
/// current obligation. Note that only trait obligations lead to
/// derived obligations, so we just store the trait reference here
/// directly.
parent_trait_ref: ty::PolyTraitRef<'tcx>,
/// The parent trait had this cause
parent_code: Rc<ObligationCauseCode<'tcx>>
}
pub type Obligations<'tcx, O> = Vec<Obligation<'tcx, O>>;
pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
pub type TraitObligations<'tcx> = Vec<TraitObligation<'tcx>>;
pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>;
#[derive(Clone,Debug)]
pub enum SelectionError<'tcx> {
Unimplemented,
OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>,
ty::PolyTraitRef<'tcx>,
ty::TypeError<'tcx>),
TraitNotObjectSafe(ast::DefId),
}
pub struct FulfillmentError<'tcx> {
pub obligation: PredicateObligation<'tcx>,
pub code: FulfillmentErrorCode<'tcx>
}
#[derive(Clone)]
pub enum FulfillmentErrorCode<'tcx> {
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
CodeAmbiguity,
}
/// When performing resolution, it is typically the case that there
/// can be one of three outcomes:
///
/// - `Ok(Some(r))`: success occurred with result `r`
/// - `Ok(None)`: could not definitely determine anything, usually due
/// to inconclusive type inference.
/// - `Err(e)`: error `e` occurred
pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/// Given the successful resolution of an obligation, the `Vtable`
/// indicates where the vtable comes from. Note that while we call this
/// a "vtable", it does not necessarily indicate dynamic dispatch at
/// runtime. `Vtable` instances just tell the compiler where to find
/// methods, but in generic code those methods are typically statically
/// dispatched -- only when an object is constructed is a `Vtable`
/// instance reified into an actual vtable.
///
/// For example, the vtable may be tied to a specific impl (case A),
/// or it may be relative to some bound that is in scope (case B).
///
///
/// ```
/// impl<T:Clone> Clone<T> for Option<T> {... } // Impl_1
/// impl<T:Clone> Clone<T> for Box<T> {... } // Impl_2
/// impl Clone for int {... } // Impl_3
///
/// fn foo<T:Clone>(concrete: Option<Box<int>>,
/// param: T,
/// mixed: Option<T>) {
///
/// // Case A: Vtable points at a specific impl. Only possible when
/// // type is concretely known. If the impl itself has bounded
/// // type parameters, Vtable will carry resolutions for those as well:
/// concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
///
/// // Case B: Vtable must be provided by caller. This applies when
/// // type is a type parameter.
/// param.clone(); // VtableParam
///
/// // Case C: A mix of cases A and B.
/// mixed.clone(); // Vtable(Impl_1, [VtableParam])
/// }
/// ```
///
/// ### The type parameter `N`
///
/// See explanation on `VtableImplData`.
#[derive(Clone)]
pub enum Vtable<'tcx, N> {
/// Vtable identifying a particular impl.
VtableImpl(VtableImplData<'tcx, N>),
/// Vtable for default trait implementations
/// This carries the information and nested obligations with regards
/// to a default implementation for a trait `Trait`. The nested obligations
/// ensure the trait implementation holds for all the constituent types.
VtableDefaultImpl(VtableDefaultImplData<N>),
/// Successful resolution to an obligation provided by the caller
/// for some type parameter. The `Vec<N>` represents the
/// obligations incurred from normalizing the where-clause (if
/// any).
VtableParam(Vec<N>),
/// Virtual calls through an object
VtableObject(VtableObjectData<'tcx>),
/// Successful resolution for a builtin trait.
VtableBuiltin(VtableBuiltinData<N>),
/// Vtable automatically generated for a closure. The def ID is the ID
/// of the closure expression. This is a `VtableImpl` in spirit, but the
/// impl is generated by the compiler and does not appear in the source.
VtableClosure(VtableClosureData<'tcx, N>),
/// Same as above, but for a fn pointer type with the given signature.
VtableFnPointer(ty::Ty<'tcx>),
}
/// Identifies a particular impl in the source, along with a set of
/// substitutions from the impl's type/lifetime parameters. The
/// `nested` vector corresponds to the nested obligations attached to
/// the impl's type parameters.
///
/// The type parameter `N` indicates the type used for "nested
/// obligations" that are required by the impl. During type check, this
/// is `Obligation`, as one might expect. During trans, however, this
/// is `()`, because trans only requires a shallow resolution of an
/// impl, and nested obligations are satisfied later.
#[derive(Clone, PartialEq, Eq)]
pub struct VtableImplData<'tcx, N> {
pub impl_def_id: ast::DefId,
pub substs: subst::Substs<'tcx>,
pub nested: Vec<N>
}
#[derive(Clone, PartialEq, Eq)]
pub struct VtableClosureData<'tcx, N> {
pub closure_def_id: ast::DefId,
pub substs: subst::Substs<'tcx>,
/// Nested obligations. This can be non-empty if the closure
/// signature contains associated types.
pub nested: Vec<N>
}
#[derive(Clone)]
pub struct VtableDefaultImplData<N> {
pub trait_def_id: ast::DefId,
pub nested: Vec<N>
}
#[derive(Clone)]
pub struct VtableBuiltinData<N> {
pub nested: Vec<N>
}
/// A vtable for some object-safe trait `Foo` automatically derived
/// for the object type `Foo`.
#[derive(PartialEq,Eq,Clone)]
pub struct VtableObjectData<'tcx> {
/// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
/// The vtable is formed by concatenating together the method lists of
/// the base object trait and all supertraits; this is the start of
/// `upcast_trait_ref`'s methods in that vtable.
pub vtable_base: usize
}
/// Creates predicate obligations from the generic bounds.
pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-> PredicateObligations<'tcx>
{
util::predicates_for_generics(cause, 0, generic_bounds)
}
/// Determines whether the type `ty` is known to meet `bound` and
/// returns true if so. Returns false if `ty` either does not meet
/// `bound` or is not known to meet bound (note that this is
/// conservative towards *no impl*, which is the opposite of the
/// `evaluate` methods).
pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
ty: Ty<'tcx>,
bound: ty::BuiltinBound,
span: Span)
-> bool
|
ty,
bound);
true
}
Err(e) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} errors={:?}",
ty,
bound,
e);
false
}
}
}
// FIXME: this is gonna need to be removed...
/// Normalizes the parameter environment, reporting errors if they occur.
pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>,
cause: ObligationCause<'tcx>)
-> ty::ParameterEnvironment<'a,'tcx>
{
// I'm not wild about reporting errors here; I'd prefer to
// have the errors get reported at a defined place (e.g.,
// during typeck). Instead I have all parameter
// environments, in effect, going through this function
// and hence potentially reporting errors. This ensurse of
// course that we never forget to normalize (the
// alternative seemed like it would involve a lot of
// manual invocations of this fn -- and then we'd have to
// deal with the errors at each of those sites).
//
// In any case, in practice, typeck constructs all the
// parameter environments once for every fn as it goes,
// and errors will get reported then; so after typeck we
// can be sure that no errors should occur.
let tcx = unnormalized_env.tcx;
let span = cause.span;
let body_id = cause.body_id;
debug!("normalize_param_env_or_error(unnormalized_env={:?})",
unnormalized_env);
let predicates: Vec<_> =
util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone())
.filter(|p|!p.is_global()) // (*)
.collect();
// (*) Any predicate like `i32: Trait<u32>` or whatever doesn't
// need to be in the *environment* to be proven, so screen those
// out. This is important for the soundness of inter-fn
// caching. Note though that we should probably check that these
// predicates hold at the point where the environment is
// constructed, but I am not currently doing so out of laziness.
// -nmatsakis
debug!("normalize_param_env_or_error: elaborated-predicates={:?}",
predicates);
let elaborated_env = unnormalized_env.with_caller_bounds(predicates);
let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(elaborated_env), false);
let predicates = match fully_normalize(&infcx, cause,
&infcx.parameter_environment.caller_bounds) {
Ok(predicates) => predicates,
Err(errors) => {
report_fulfillment_errors(&infcx, &errors);
return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
let free_regions = FreeRegionMap::new();
infcx.resolve_regions_and_report_errors(&free_regions, body_id);
let predicates = match infcx.fully_resolve(&predicates) {
Ok(predicates) => predicates,
Err(fixup_err) => {
// If we encounter a fixup error, it means that some type
// variable wound up unconstrained. I actually don't know
// if this can happen, and I certainly don't expect it to
// happen often, but if it did happen it probably
// represents a legitimate failure due to some kind of
// unconstrained variable, and it seems better not to ICE,
// all things considered.
let err_msg = fixup_err_to_string(fixup_err);
tcx.sess.span_err(span, &err_msg);
return infcx.parameter_environment; // an unnormalized env is better than nothing
}
};
infcx.parameter_environment.with_caller_bounds(predicates)
}
pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
cause: ObligationCause<'tcx>,
value: &T)
-> Result<T, Vec<FulfillmentError<'tcx>>>
where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_param_env(value={:?})", value);
let mut selcx = &mut SelectionContext::new(infcx);
// FIXME (@jroesch) ISSUE 26721
// I'm not sure if this is a bug or not, needs further investigation.
// It appears that by reusing the fulfillment_cx here we incur more
// obligations and later trip an asssertion on regionck.rs line 337.
//
// The two possibilities I see is:
// - normalization is not actually fully happening and we
// have a bug else where
// - we are adding a duplicate bound into the list causing
// its size to change.
//
// I think we should probably land this refactor and then come
// back to this is a follow-up patch.
let mut fulfill_cx = FulfillmentContext::new(false);
let Normalized { value: normalized_value, obligations } =
project::normalize(selcx, cause, value);
debug!("normalize_param_env: normalized_value={:?} obligations={:?}",
normalized_value,
obligations);
for obligation in obligations {
fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
}
try!(fulfill_cx.select_all_or_error(infcx));
let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value);
debug!("normalize_param_env: resolved_value={:?}", resolved_value);
Ok(resolved_value)
}
impl<'tcx,O> Obligation<'tcx,O> {
pub fn new(cause: ObligationCause<'tcx>,
trait_ref: O)
-> Obligation<'tcx, O>
{
Obligation { cause: cause,
recursion_depth: 0,
predicate: trait_ref }
}
fn with_depth(cause: ObligationCause<'tcx>,
recursion_depth: usize,
trait_ref: O)
-> Obligation<'tcx, O>
{
Obligation { cause: cause,
recursion_depth: recursion_depth,
predicate: trait_ref }
}
pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> {
Obligation::new(ObligationCause::misc(span, body_id), trait_ref)
}
pub fn with<P>(&self, value: P) -> Obligation<'tcx,P> {
Obligation { cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
predicate: value }
}
}
impl<'tcx> ObligationCause<'tcx> {
pub fn new(span: Span,
body_id: ast::NodeId,
code: ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx> {
ObligationCause { span: span, body_id: body_id, code: code }
}
pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> {
ObligationCause { span: span, body_id: body_id, code: MiscObligation }
}
pub fn dummy() -> ObligationCause<'tcx> {
ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation }
}
}
impl<'tcx, N> Vtable<'tcx, N> {
pub fn nested_obligations(self) -> Vec<N> {
match self {
VtableImpl(i) => i.nested,
VtableParam(n) => n,
VtableBuiltin(i) => i.nested,
VtableDefaultImpl(d) => d.nested,
VtableClosure(c) => c.nested,
VtableObject(_) | VtableFnPointer(..) => vec![]
}
}
pub fn map<M, F>(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M {
match self {
VtableImpl(i) => VtableImpl(VtableImplData {
impl_def_id: i.impl_def_id,
substs: i.substs,
nested: i.nested.into_iter().map(f).collect()
}),
VtableParam(n) => VtableParam(n.into_iter().map(f).collect()),
VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData {
nested: i.nested.into_iter().map(f).collect()
}),
VtableObject(o) => VtableObject(o),
VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData {
trait_def_id: d.trait_def_id,
nested: d.nested.into_iter().map(f).collect()
}),
VtableFnPointer(f) => VtableFnPointer(f),
VtableClosure(c) => VtableClosure(VtableClosureData {
closure_def_id: c.closure_def_id,
substs: c.substs,
nested: c.nested.into_iter().map(f).collect()
})
}
}
}
impl<'tcx> FulfillmentError<'tcx> {
fn new(obligation: PredicateObligation<'tcx>,
code: FulfillmentErrorCode<'tcx>)
-> FulfillmentError<'tcx>
{
FulfillmentError { obligation: obligation, code: code }
}
}
impl<'tcx> TraitObligation<'tcx> {
fn self_ty(&self) -> ty::Binder<Ty<'tcx>> {
ty::Binder(self.predicate.skip_binder().self_ty())
}
}
|
{
debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})",
ty,
bound);
let mut fulfill_cx = FulfillmentContext::new(false);
// We can use a dummy node-id here because we won't pay any mind
// to region obligations that arise (there shouldn't really be any
// anyhow).
let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID);
fulfill_cx.register_builtin_bound(infcx, ty, bound, cause);
// Note: we only assume something is `Copy` if we can
// *definitively* show that it implements `Copy`. Otherwise,
// assume it is move; linear is always ok.
match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => {
debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success",
|
identifier_body
|
constellation_msg.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use nonzero::NonZeroU32;
use std::cell::Cell;
use std::fmt;
use webrender_api;
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum KeyState {
Pressed,
Released,
Repeated,
}
//N.B. Based on the glutin key enum
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum Key {
Space,
Apostrophe,
Comma,
Minus,
Period,
Slash,
Num0,
Num1,
Num2,
Num3,
Num4,
Num5,
Num6,
Num7,
Num8,
Num9,
Semicolon,
Equal,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
LeftBracket,
Backslash,
RightBracket,
GraveAccent,
World1,
World2,
Escape,
Enter,
Tab,
Backspace,
Insert,
Delete,
Right,
Left,
Down,
Up,
PageUp,
PageDown,
Home,
End,
CapsLock,
ScrollLock,
NumLock,
PrintScreen,
Pause,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
Kp0,
Kp1,
Kp2,
Kp3,
Kp4,
Kp5,
Kp6,
Kp7,
Kp8,
Kp9,
KpDecimal,
KpDivide,
KpMultiply,
KpSubtract,
KpAdd,
KpEnter,
KpEqual,
LeftShift,
LeftControl,
LeftAlt,
LeftSuper,
RightShift,
RightControl,
RightAlt,
RightSuper,
Menu,
NavigateBackward,
NavigateForward,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct KeyModifiers: u8 {
const NONE = 0x00;
const SHIFT = 0x01;
const CONTROL = 0x02;
const ALT = 0x04;
const SUPER = 0x08;
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TraversalDirection {
Forward(usize),
Back(usize),
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
/// This ensures that namespaces are always unique, even when using multi-process mode.
///
/// It may help conceptually to think of the namespace ID as an identifier for the
/// thread that created this pipeline ID - however this is really an implementation
/// detail so shouldn't be relied upon in code logic. It's best to think of the
/// pipeline ID as a simple unique identifier that doesn't convey any more information.
#[derive(Clone, Copy)]
pub struct PipelineNamespace {
id: PipelineNamespaceId,
index: u32,
}
impl PipelineNamespace {
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
tls.set(Some(PipelineNamespace {
id: namespace_id,
index: 0,
}));
});
}
fn next_index(&mut self) -> NonZeroU32 {
self.index += 1;
NonZeroU32::new(self.index).expect("pipeline id index wrapped!")
}
fn next_pipeline_id(&mut self) -> PipelineId {
PipelineId {
namespace_id: self.id,
index: PipelineIndex(self.next_index()),
}
}
fn next_browsing_context_id(&mut self) -> BrowsingContextId {
BrowsingContextId {
namespace_id: self.id,
index: BrowsingContextIndex(self.next_index()),
}
}
fn next_history_state_id(&mut self) -> HistoryStateId {
HistoryStateId {
namespace_id: self.id,
index: HistoryStateIndex(self.next_index()),
}
}
}
thread_local!(pub static PIPELINE_NAMESPACE: Cell<Option<PipelineNamespace>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineNamespaceId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineIndex(pub NonZeroU32);
malloc_size_of_is_0!(PipelineIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineId {
pub namespace_id: PipelineNamespaceId,
pub index: PipelineIndex
}
impl PipelineId {
pub fn new() -> PipelineId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_pipeline_id = namespace.next_pipeline_id();
tls.set(Some(namespace));
new_pipeline_id
})
}
pub fn to_webrender(&self) -> webrender_api::PipelineId {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
webrender_api::PipelineId(namespace_id, index.get())
}
#[allow(unsafe_code)]
pub fn from_webrender(pipeline: webrender_api::PipelineId) -> PipelineId {
let webrender_api::PipelineId(namespace_id, index) = pipeline;
unsafe {
PipelineId {
namespace_id: PipelineNamespaceId(namespace_id),
index: PipelineIndex(NonZeroU32::new_unchecked(index)),
}
}
}
pub fn root_scroll_node(&self) -> webrender_api::ClipId {
webrender_api::ClipId::root_scroll_node(self.to_webrender())
}
pub fn root_scroll_id(&self) -> webrender_api::ExternalScrollId {
webrender_api::ExternalScrollId(0, self.to_webrender())
}
pub fn root_clip_and_scroll_info(&self) -> webrender_api::ClipAndScrollInfo {
webrender_api::ClipAndScrollInfo::simple(self.root_scroll_node())
}
}
impl fmt::Display for PipelineId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextIndex(pub NonZeroU32);
malloc_size_of_is_0!(BrowsingContextIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextId {
pub namespace_id: PipelineNamespaceId,
pub index: BrowsingContextIndex,
}
impl BrowsingContextId {
pub fn new() -> BrowsingContextId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_browsing_context_id = namespace.next_browsing_context_id();
tls.set(Some(namespace));
new_browsing_context_id
})
}
}
|
write!(fmt, "({},{})", namespace_id, index.get())
}
}
thread_local!(pub static TOP_LEVEL_BROWSING_CONTEXT_ID: Cell<Option<TopLevelBrowsingContextId>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct TopLevelBrowsingContextId(BrowsingContextId);
impl TopLevelBrowsingContextId {
pub fn new() -> TopLevelBrowsingContextId {
TopLevelBrowsingContextId(BrowsingContextId::new())
}
/// Each script and layout thread should have the top-level browsing context id installed,
/// since it is used by crash reporting.
pub fn install(id: TopLevelBrowsingContextId) {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.set(Some(id)))
}
pub fn installed() -> Option<TopLevelBrowsingContextId> {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.get())
}
}
impl fmt::Display for TopLevelBrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<TopLevelBrowsingContextId> for BrowsingContextId {
fn from(id: TopLevelBrowsingContextId) -> BrowsingContextId {
id.0
}
}
impl PartialEq<TopLevelBrowsingContextId> for BrowsingContextId {
fn eq(&self, rhs: &TopLevelBrowsingContextId) -> bool {
self.eq(&rhs.0)
}
}
impl PartialEq<BrowsingContextId> for TopLevelBrowsingContextId {
fn eq(&self, rhs: &BrowsingContextId) -> bool {
self.0.eq(rhs)
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct HistoryStateIndex(pub NonZeroU32);
malloc_size_of_is_0!(HistoryStateIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct HistoryStateId {
pub namespace_id: PipelineNamespaceId,
pub index: HistoryStateIndex,
}
impl HistoryStateId {
pub fn new() -> HistoryStateId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let next_history_state_id = namespace.next_history_state_id();
tls.set(Some(namespace));
next_history_state_id
})
}
}
impl fmt::Display for HistoryStateId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let HistoryStateIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
// We provide ids just for unit testing.
pub const TEST_NAMESPACE: PipelineNamespaceId = PipelineNamespaceId(1234);
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_INDEX: PipelineIndex = unsafe { PipelineIndex(NonZeroU32::new_unchecked(5678)) };
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_ID: PipelineId = PipelineId { namespace_id: TEST_NAMESPACE, index: TEST_PIPELINE_INDEX };
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_INDEX: BrowsingContextIndex =
unsafe { BrowsingContextIndex(NonZeroU32::new_unchecked(8765)) };
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_ID: BrowsingContextId =
BrowsingContextId { namespace_id: TEST_NAMESPACE, index: TEST_BROWSING_CONTEXT_INDEX };
// Used to specify the kind of input method editor appropriate to edit a field.
// This is a subset of htmlinputelement::InputType because some variants of InputType
// don't make sense in this context.
#[derive(Deserialize, Serialize)]
pub enum InputMethodType {
Color,
Date,
DatetimeLocal,
Email,
Month,
Number,
Password,
Search,
Tel,
Text,
Time,
Url,
Week,
}
|
impl fmt::Display for BrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let BrowsingContextIndex(index) = self.index;
|
random_line_split
|
constellation_msg.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use nonzero::NonZeroU32;
use std::cell::Cell;
use std::fmt;
use webrender_api;
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum KeyState {
Pressed,
Released,
Repeated,
}
//N.B. Based on the glutin key enum
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum Key {
Space,
Apostrophe,
Comma,
Minus,
Period,
Slash,
Num0,
Num1,
Num2,
Num3,
Num4,
Num5,
Num6,
Num7,
Num8,
Num9,
Semicolon,
Equal,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
LeftBracket,
Backslash,
RightBracket,
GraveAccent,
World1,
World2,
Escape,
Enter,
Tab,
Backspace,
Insert,
Delete,
Right,
Left,
Down,
Up,
PageUp,
PageDown,
Home,
End,
CapsLock,
ScrollLock,
NumLock,
PrintScreen,
Pause,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
Kp0,
Kp1,
Kp2,
Kp3,
Kp4,
Kp5,
Kp6,
Kp7,
Kp8,
Kp9,
KpDecimal,
KpDivide,
KpMultiply,
KpSubtract,
KpAdd,
KpEnter,
KpEqual,
LeftShift,
LeftControl,
LeftAlt,
LeftSuper,
RightShift,
RightControl,
RightAlt,
RightSuper,
Menu,
NavigateBackward,
NavigateForward,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct KeyModifiers: u8 {
const NONE = 0x00;
const SHIFT = 0x01;
const CONTROL = 0x02;
const ALT = 0x04;
const SUPER = 0x08;
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TraversalDirection {
Forward(usize),
Back(usize),
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
/// This ensures that namespaces are always unique, even when using multi-process mode.
///
/// It may help conceptually to think of the namespace ID as an identifier for the
/// thread that created this pipeline ID - however this is really an implementation
/// detail so shouldn't be relied upon in code logic. It's best to think of the
/// pipeline ID as a simple unique identifier that doesn't convey any more information.
#[derive(Clone, Copy)]
pub struct PipelineNamespace {
id: PipelineNamespaceId,
index: u32,
}
impl PipelineNamespace {
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
tls.set(Some(PipelineNamespace {
id: namespace_id,
index: 0,
}));
});
}
fn next_index(&mut self) -> NonZeroU32 {
self.index += 1;
NonZeroU32::new(self.index).expect("pipeline id index wrapped!")
}
fn next_pipeline_id(&mut self) -> PipelineId {
PipelineId {
namespace_id: self.id,
index: PipelineIndex(self.next_index()),
}
}
fn next_browsing_context_id(&mut self) -> BrowsingContextId {
BrowsingContextId {
namespace_id: self.id,
index: BrowsingContextIndex(self.next_index()),
}
}
fn next_history_state_id(&mut self) -> HistoryStateId {
HistoryStateId {
namespace_id: self.id,
index: HistoryStateIndex(self.next_index()),
}
}
}
thread_local!(pub static PIPELINE_NAMESPACE: Cell<Option<PipelineNamespace>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineNamespaceId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineIndex(pub NonZeroU32);
malloc_size_of_is_0!(PipelineIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineId {
pub namespace_id: PipelineNamespaceId,
pub index: PipelineIndex
}
impl PipelineId {
pub fn new() -> PipelineId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_pipeline_id = namespace.next_pipeline_id();
tls.set(Some(namespace));
new_pipeline_id
})
}
pub fn to_webrender(&self) -> webrender_api::PipelineId {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
webrender_api::PipelineId(namespace_id, index.get())
}
#[allow(unsafe_code)]
pub fn from_webrender(pipeline: webrender_api::PipelineId) -> PipelineId {
let webrender_api::PipelineId(namespace_id, index) = pipeline;
unsafe {
PipelineId {
namespace_id: PipelineNamespaceId(namespace_id),
index: PipelineIndex(NonZeroU32::new_unchecked(index)),
}
}
}
pub fn root_scroll_node(&self) -> webrender_api::ClipId {
webrender_api::ClipId::root_scroll_node(self.to_webrender())
}
pub fn root_scroll_id(&self) -> webrender_api::ExternalScrollId {
webrender_api::ExternalScrollId(0, self.to_webrender())
}
pub fn root_clip_and_scroll_info(&self) -> webrender_api::ClipAndScrollInfo {
webrender_api::ClipAndScrollInfo::simple(self.root_scroll_node())
}
}
impl fmt::Display for PipelineId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextIndex(pub NonZeroU32);
malloc_size_of_is_0!(BrowsingContextIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextId {
pub namespace_id: PipelineNamespaceId,
pub index: BrowsingContextIndex,
}
impl BrowsingContextId {
pub fn new() -> BrowsingContextId
|
}
impl fmt::Display for BrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let BrowsingContextIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
thread_local!(pub static TOP_LEVEL_BROWSING_CONTEXT_ID: Cell<Option<TopLevelBrowsingContextId>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct TopLevelBrowsingContextId(BrowsingContextId);
impl TopLevelBrowsingContextId {
pub fn new() -> TopLevelBrowsingContextId {
TopLevelBrowsingContextId(BrowsingContextId::new())
}
/// Each script and layout thread should have the top-level browsing context id installed,
/// since it is used by crash reporting.
pub fn install(id: TopLevelBrowsingContextId) {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.set(Some(id)))
}
pub fn installed() -> Option<TopLevelBrowsingContextId> {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.get())
}
}
impl fmt::Display for TopLevelBrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<TopLevelBrowsingContextId> for BrowsingContextId {
fn from(id: TopLevelBrowsingContextId) -> BrowsingContextId {
id.0
}
}
impl PartialEq<TopLevelBrowsingContextId> for BrowsingContextId {
fn eq(&self, rhs: &TopLevelBrowsingContextId) -> bool {
self.eq(&rhs.0)
}
}
impl PartialEq<BrowsingContextId> for TopLevelBrowsingContextId {
fn eq(&self, rhs: &BrowsingContextId) -> bool {
self.0.eq(rhs)
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct HistoryStateIndex(pub NonZeroU32);
malloc_size_of_is_0!(HistoryStateIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct HistoryStateId {
pub namespace_id: PipelineNamespaceId,
pub index: HistoryStateIndex,
}
impl HistoryStateId {
pub fn new() -> HistoryStateId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let next_history_state_id = namespace.next_history_state_id();
tls.set(Some(namespace));
next_history_state_id
})
}
}
impl fmt::Display for HistoryStateId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let HistoryStateIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
// We provide ids just for unit testing.
pub const TEST_NAMESPACE: PipelineNamespaceId = PipelineNamespaceId(1234);
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_INDEX: PipelineIndex = unsafe { PipelineIndex(NonZeroU32::new_unchecked(5678)) };
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_ID: PipelineId = PipelineId { namespace_id: TEST_NAMESPACE, index: TEST_PIPELINE_INDEX };
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_INDEX: BrowsingContextIndex =
unsafe { BrowsingContextIndex(NonZeroU32::new_unchecked(8765)) };
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_ID: BrowsingContextId =
BrowsingContextId { namespace_id: TEST_NAMESPACE, index: TEST_BROWSING_CONTEXT_INDEX };
// Used to specify the kind of input method editor appropriate to edit a field.
// This is a subset of htmlinputelement::InputType because some variants of InputType
// don't make sense in this context.
#[derive(Deserialize, Serialize)]
pub enum InputMethodType {
Color,
Date,
DatetimeLocal,
Email,
Month,
Number,
Password,
Search,
Tel,
Text,
Time,
Url,
Week,
}
|
{
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_browsing_context_id = namespace.next_browsing_context_id();
tls.set(Some(namespace));
new_browsing_context_id
})
}
|
identifier_body
|
constellation_msg.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use nonzero::NonZeroU32;
use std::cell::Cell;
use std::fmt;
use webrender_api;
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum KeyState {
Pressed,
Released,
Repeated,
}
//N.B. Based on the glutin key enum
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum Key {
Space,
Apostrophe,
Comma,
Minus,
Period,
Slash,
Num0,
Num1,
Num2,
Num3,
Num4,
Num5,
Num6,
Num7,
Num8,
Num9,
Semicolon,
Equal,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
LeftBracket,
Backslash,
RightBracket,
GraveAccent,
World1,
World2,
Escape,
Enter,
Tab,
Backspace,
Insert,
Delete,
Right,
Left,
Down,
Up,
PageUp,
PageDown,
Home,
End,
CapsLock,
ScrollLock,
NumLock,
PrintScreen,
Pause,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
Kp0,
Kp1,
Kp2,
Kp3,
Kp4,
Kp5,
Kp6,
Kp7,
Kp8,
Kp9,
KpDecimal,
KpDivide,
KpMultiply,
KpSubtract,
KpAdd,
KpEnter,
KpEqual,
LeftShift,
LeftControl,
LeftAlt,
LeftSuper,
RightShift,
RightControl,
RightAlt,
RightSuper,
Menu,
NavigateBackward,
NavigateForward,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct KeyModifiers: u8 {
const NONE = 0x00;
const SHIFT = 0x01;
const CONTROL = 0x02;
const ALT = 0x04;
const SUPER = 0x08;
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TraversalDirection {
Forward(usize),
Back(usize),
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
/// This ensures that namespaces are always unique, even when using multi-process mode.
///
/// It may help conceptually to think of the namespace ID as an identifier for the
/// thread that created this pipeline ID - however this is really an implementation
/// detail so shouldn't be relied upon in code logic. It's best to think of the
/// pipeline ID as a simple unique identifier that doesn't convey any more information.
#[derive(Clone, Copy)]
pub struct PipelineNamespace {
id: PipelineNamespaceId,
index: u32,
}
impl PipelineNamespace {
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
tls.set(Some(PipelineNamespace {
id: namespace_id,
index: 0,
}));
});
}
fn next_index(&mut self) -> NonZeroU32 {
self.index += 1;
NonZeroU32::new(self.index).expect("pipeline id index wrapped!")
}
fn next_pipeline_id(&mut self) -> PipelineId {
PipelineId {
namespace_id: self.id,
index: PipelineIndex(self.next_index()),
}
}
fn next_browsing_context_id(&mut self) -> BrowsingContextId {
BrowsingContextId {
namespace_id: self.id,
index: BrowsingContextIndex(self.next_index()),
}
}
fn next_history_state_id(&mut self) -> HistoryStateId {
HistoryStateId {
namespace_id: self.id,
index: HistoryStateIndex(self.next_index()),
}
}
}
thread_local!(pub static PIPELINE_NAMESPACE: Cell<Option<PipelineNamespace>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineNamespaceId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineIndex(pub NonZeroU32);
malloc_size_of_is_0!(PipelineIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineId {
pub namespace_id: PipelineNamespaceId,
pub index: PipelineIndex
}
impl PipelineId {
pub fn new() -> PipelineId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_pipeline_id = namespace.next_pipeline_id();
tls.set(Some(namespace));
new_pipeline_id
})
}
pub fn to_webrender(&self) -> webrender_api::PipelineId {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
webrender_api::PipelineId(namespace_id, index.get())
}
#[allow(unsafe_code)]
pub fn from_webrender(pipeline: webrender_api::PipelineId) -> PipelineId {
let webrender_api::PipelineId(namespace_id, index) = pipeline;
unsafe {
PipelineId {
namespace_id: PipelineNamespaceId(namespace_id),
index: PipelineIndex(NonZeroU32::new_unchecked(index)),
}
}
}
pub fn root_scroll_node(&self) -> webrender_api::ClipId {
webrender_api::ClipId::root_scroll_node(self.to_webrender())
}
pub fn root_scroll_id(&self) -> webrender_api::ExternalScrollId {
webrender_api::ExternalScrollId(0, self.to_webrender())
}
pub fn root_clip_and_scroll_info(&self) -> webrender_api::ClipAndScrollInfo {
webrender_api::ClipAndScrollInfo::simple(self.root_scroll_node())
}
}
impl fmt::Display for PipelineId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextIndex(pub NonZeroU32);
malloc_size_of_is_0!(BrowsingContextIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextId {
pub namespace_id: PipelineNamespaceId,
pub index: BrowsingContextIndex,
}
impl BrowsingContextId {
pub fn new() -> BrowsingContextId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_browsing_context_id = namespace.next_browsing_context_id();
tls.set(Some(namespace));
new_browsing_context_id
})
}
}
impl fmt::Display for BrowsingContextId {
fn
|
(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let BrowsingContextIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
thread_local!(pub static TOP_LEVEL_BROWSING_CONTEXT_ID: Cell<Option<TopLevelBrowsingContextId>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct TopLevelBrowsingContextId(BrowsingContextId);
impl TopLevelBrowsingContextId {
pub fn new() -> TopLevelBrowsingContextId {
TopLevelBrowsingContextId(BrowsingContextId::new())
}
/// Each script and layout thread should have the top-level browsing context id installed,
/// since it is used by crash reporting.
pub fn install(id: TopLevelBrowsingContextId) {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.set(Some(id)))
}
pub fn installed() -> Option<TopLevelBrowsingContextId> {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.get())
}
}
impl fmt::Display for TopLevelBrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<TopLevelBrowsingContextId> for BrowsingContextId {
fn from(id: TopLevelBrowsingContextId) -> BrowsingContextId {
id.0
}
}
impl PartialEq<TopLevelBrowsingContextId> for BrowsingContextId {
fn eq(&self, rhs: &TopLevelBrowsingContextId) -> bool {
self.eq(&rhs.0)
}
}
impl PartialEq<BrowsingContextId> for TopLevelBrowsingContextId {
fn eq(&self, rhs: &BrowsingContextId) -> bool {
self.0.eq(rhs)
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct HistoryStateIndex(pub NonZeroU32);
malloc_size_of_is_0!(HistoryStateIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct HistoryStateId {
pub namespace_id: PipelineNamespaceId,
pub index: HistoryStateIndex,
}
impl HistoryStateId {
pub fn new() -> HistoryStateId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let next_history_state_id = namespace.next_history_state_id();
tls.set(Some(namespace));
next_history_state_id
})
}
}
impl fmt::Display for HistoryStateId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let HistoryStateIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
// We provide ids just for unit testing.
pub const TEST_NAMESPACE: PipelineNamespaceId = PipelineNamespaceId(1234);
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_INDEX: PipelineIndex = unsafe { PipelineIndex(NonZeroU32::new_unchecked(5678)) };
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_ID: PipelineId = PipelineId { namespace_id: TEST_NAMESPACE, index: TEST_PIPELINE_INDEX };
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_INDEX: BrowsingContextIndex =
unsafe { BrowsingContextIndex(NonZeroU32::new_unchecked(8765)) };
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_ID: BrowsingContextId =
BrowsingContextId { namespace_id: TEST_NAMESPACE, index: TEST_BROWSING_CONTEXT_INDEX };
// Used to specify the kind of input method editor appropriate to edit a field.
// This is a subset of htmlinputelement::InputType because some variants of InputType
// don't make sense in this context.
#[derive(Deserialize, Serialize)]
pub enum InputMethodType {
Color,
Date,
DatetimeLocal,
Email,
Month,
Number,
Password,
Search,
Tel,
Text,
Time,
Url,
Week,
}
|
fmt
|
identifier_name
|
cave-4.rs
|
use std::io::BufRead;
// Meet-in-the-middle algorithm using binary search.
fn read_int() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap().unwrap();
line.parse::<usize>().unwrap()
}
fn read_weight() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap();
let numbers: Vec<usize> =
line.unwrap().split('.').map(|n| n.parse().unwrap()).collect();
10_000_000 * numbers[0] + numbers[1]
}
fn print_weight(w: usize) {
println!("{}.{:07}", w / 10_000_000,
w % 10_000_000);
}
// Compute the weight of every possible combination of nuggets.
fn compute_weights(nuggets: &[usize]) -> Vec<(usize, usize)> {
let n = nuggets.len();
let mut weights = Vec::with_capacity(1 << n);
weights.push((0, 0));
for i in 0..n {
let start = 1 << i;
for j in 0..start {
let weight = nuggets[i] + weights[j].1;
weights.push((start + j, weight));
}
}
weights.sort_by(|x, y| x.1.cmp(&y.1));
weights
}
// Find the nugget combination whose weight is closest to `limit`.
fn find_closest(limit: usize, weights: &[(usize, usize)]) -> (usize, usize) {
let mut lo = 0;
let mut hi = weights.len();
while hi - lo > 1 {
let mid = (lo + hi) / 2;
if weights[mid].1 <= limit { lo = mid; } else
|
}
weights[lo]
}
fn main() {
// read input
let backpack = read_weight();
let n = read_int();
let nuggets: Vec<usize> = (0..n).map(|_| read_weight()).collect();
// split nuggets into two sets A and B
let (a, b) = nuggets.split_at(n as usize / 2);
// compute weights for all subsets of A and B
let weights_a = compute_weights(a);
let weights_b = compute_weights(b);
// find the best combination of nuggets from A and B
let mut best = (0, 0);
for wa in weights_a {
if wa.1 > backpack {
continue;
}
let wb = find_closest(backpack - wa.1, &weights_b);
let combined_weight = wa.1 + wb.1;
if combined_weight > best.1 {
best = (wb.0 << a.len() | wa.0, combined_weight);
}
}
// print results
print_weight(best.1);
for i in 0..n {
if 1 << i & best.0!= 0 {
print_weight(nuggets[i as usize]);
}
}
}
|
{ hi = mid; }
|
conditional_block
|
cave-4.rs
|
use std::io::BufRead;
// Meet-in-the-middle algorithm using binary search.
fn read_int() -> usize
|
fn read_weight() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap();
let numbers: Vec<usize> =
line.unwrap().split('.').map(|n| n.parse().unwrap()).collect();
10_000_000 * numbers[0] + numbers[1]
}
fn print_weight(w: usize) {
println!("{}.{:07}", w / 10_000_000,
w % 10_000_000);
}
// Compute the weight of every possible combination of nuggets.
fn compute_weights(nuggets: &[usize]) -> Vec<(usize, usize)> {
let n = nuggets.len();
let mut weights = Vec::with_capacity(1 << n);
weights.push((0, 0));
for i in 0..n {
let start = 1 << i;
for j in 0..start {
let weight = nuggets[i] + weights[j].1;
weights.push((start + j, weight));
}
}
weights.sort_by(|x, y| x.1.cmp(&y.1));
weights
}
// Find the nugget combination whose weight is closest to `limit`.
fn find_closest(limit: usize, weights: &[(usize, usize)]) -> (usize, usize) {
let mut lo = 0;
let mut hi = weights.len();
while hi - lo > 1 {
let mid = (lo + hi) / 2;
if weights[mid].1 <= limit { lo = mid; } else { hi = mid; }
}
weights[lo]
}
fn main() {
// read input
let backpack = read_weight();
let n = read_int();
let nuggets: Vec<usize> = (0..n).map(|_| read_weight()).collect();
// split nuggets into two sets A and B
let (a, b) = nuggets.split_at(n as usize / 2);
// compute weights for all subsets of A and B
let weights_a = compute_weights(a);
let weights_b = compute_weights(b);
// find the best combination of nuggets from A and B
let mut best = (0, 0);
for wa in weights_a {
if wa.1 > backpack {
continue;
}
let wb = find_closest(backpack - wa.1, &weights_b);
let combined_weight = wa.1 + wb.1;
if combined_weight > best.1 {
best = (wb.0 << a.len() | wa.0, combined_weight);
}
}
// print results
print_weight(best.1);
for i in 0..n {
if 1 << i & best.0!= 0 {
print_weight(nuggets[i as usize]);
}
}
}
|
{
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap().unwrap();
line.parse::<usize>().unwrap()
}
|
identifier_body
|
cave-4.rs
|
use std::io::BufRead;
// Meet-in-the-middle algorithm using binary search.
fn read_int() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap().unwrap();
line.parse::<usize>().unwrap()
}
fn read_weight() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap();
let numbers: Vec<usize> =
line.unwrap().split('.').map(|n| n.parse().unwrap()).collect();
10_000_000 * numbers[0] + numbers[1]
}
fn print_weight(w: usize) {
println!("{}.{:07}", w / 10_000_000,
w % 10_000_000);
}
// Compute the weight of every possible combination of nuggets.
fn compute_weights(nuggets: &[usize]) -> Vec<(usize, usize)> {
let n = nuggets.len();
let mut weights = Vec::with_capacity(1 << n);
weights.push((0, 0));
for i in 0..n {
let start = 1 << i;
for j in 0..start {
let weight = nuggets[i] + weights[j].1;
weights.push((start + j, weight));
}
}
weights.sort_by(|x, y| x.1.cmp(&y.1));
weights
}
// Find the nugget combination whose weight is closest to `limit`.
fn find_closest(limit: usize, weights: &[(usize, usize)]) -> (usize, usize) {
let mut lo = 0;
let mut hi = weights.len();
while hi - lo > 1 {
let mid = (lo + hi) / 2;
if weights[mid].1 <= limit { lo = mid; } else { hi = mid; }
}
weights[lo]
}
fn main() {
// read input
let backpack = read_weight();
let n = read_int();
let nuggets: Vec<usize> = (0..n).map(|_| read_weight()).collect();
// split nuggets into two sets A and B
let (a, b) = nuggets.split_at(n as usize / 2);
// compute weights for all subsets of A and B
let weights_a = compute_weights(a);
let weights_b = compute_weights(b);
// find the best combination of nuggets from A and B
let mut best = (0, 0);
for wa in weights_a {
if wa.1 > backpack {
continue;
}
|
best = (wb.0 << a.len() | wa.0, combined_weight);
}
}
// print results
print_weight(best.1);
for i in 0..n {
if 1 << i & best.0!= 0 {
print_weight(nuggets[i as usize]);
}
}
}
|
let wb = find_closest(backpack - wa.1, &weights_b);
let combined_weight = wa.1 + wb.1;
if combined_weight > best.1 {
|
random_line_split
|
cave-4.rs
|
use std::io::BufRead;
// Meet-in-the-middle algorithm using binary search.
fn read_int() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap().unwrap();
line.parse::<usize>().unwrap()
}
fn read_weight() -> usize {
let stdin = std::io::stdin();
let line = stdin.lock().lines().next().unwrap();
let numbers: Vec<usize> =
line.unwrap().split('.').map(|n| n.parse().unwrap()).collect();
10_000_000 * numbers[0] + numbers[1]
}
fn
|
(w: usize) {
println!("{}.{:07}", w / 10_000_000,
w % 10_000_000);
}
// Compute the weight of every possible combination of nuggets.
fn compute_weights(nuggets: &[usize]) -> Vec<(usize, usize)> {
let n = nuggets.len();
let mut weights = Vec::with_capacity(1 << n);
weights.push((0, 0));
for i in 0..n {
let start = 1 << i;
for j in 0..start {
let weight = nuggets[i] + weights[j].1;
weights.push((start + j, weight));
}
}
weights.sort_by(|x, y| x.1.cmp(&y.1));
weights
}
// Find the nugget combination whose weight is closest to `limit`.
fn find_closest(limit: usize, weights: &[(usize, usize)]) -> (usize, usize) {
let mut lo = 0;
let mut hi = weights.len();
while hi - lo > 1 {
let mid = (lo + hi) / 2;
if weights[mid].1 <= limit { lo = mid; } else { hi = mid; }
}
weights[lo]
}
fn main() {
// read input
let backpack = read_weight();
let n = read_int();
let nuggets: Vec<usize> = (0..n).map(|_| read_weight()).collect();
// split nuggets into two sets A and B
let (a, b) = nuggets.split_at(n as usize / 2);
// compute weights for all subsets of A and B
let weights_a = compute_weights(a);
let weights_b = compute_weights(b);
// find the best combination of nuggets from A and B
let mut best = (0, 0);
for wa in weights_a {
if wa.1 > backpack {
continue;
}
let wb = find_closest(backpack - wa.1, &weights_b);
let combined_weight = wa.1 + wb.1;
if combined_weight > best.1 {
best = (wb.0 << a.len() | wa.0, combined_weight);
}
}
// print results
print_weight(best.1);
for i in 0..n {
if 1 << i & best.0!= 0 {
print_weight(nuggets[i as usize]);
}
}
}
|
print_weight
|
identifier_name
|
rust_base64.rs
|
extern crate base64;
use errors::prelude::*;
use failure::ResultExt;
pub fn encode(doc: &[u8]) -> String {
base64::encode(doc)
}
pub fn decode(doc: &str) -> Result<Vec<u8>, IndyError> {
base64::decode(doc)
.context("Invalid base64 sequence")
.context(IndyErrorKind::InvalidStructure)
.map_err(|err| err.into())
}
pub fn
|
(doc: &[u8]) -> String {
base64::encode_config(doc, base64::URL_SAFE)
}
pub fn decode_urlsafe(doc: &str) -> Result<Vec<u8>, IndyError> {
base64::decode_config(doc, base64::URL_SAFE)
.context("Invalid base64URL_SAFE sequence")
.context(IndyErrorKind::InvalidStructure)
.map_err(|err| err.into())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encode_works() {
let result = encode(&[1, 2, 3]);
assert_eq!("AQID", &result);
}
#[test]
fn decode_works() {
let result = decode("AQID");
assert!(result.is_ok(), "Got error");
assert_eq!(&[1, 2, 3], &result.unwrap()[..]);
}
#[test]
fn encode_urlsafe_works() {
let result = encode_urlsafe(&[1, 2, 3]);
assert_eq!("AQID", &result);
}
#[test]
fn decode_urlsafe_works() {
let result = decode_urlsafe("AQID");
assert!(result.is_ok(), "Got error");
assert_eq!(&[1, 2, 3], &result.unwrap()[..]);
}
}
|
encode_urlsafe
|
identifier_name
|
rust_base64.rs
|
extern crate base64;
use errors::prelude::*;
use failure::ResultExt;
pub fn encode(doc: &[u8]) -> String {
base64::encode(doc)
}
pub fn decode(doc: &str) -> Result<Vec<u8>, IndyError> {
base64::decode(doc)
.context("Invalid base64 sequence")
.context(IndyErrorKind::InvalidStructure)
.map_err(|err| err.into())
}
pub fn encode_urlsafe(doc: &[u8]) -> String {
base64::encode_config(doc, base64::URL_SAFE)
}
pub fn decode_urlsafe(doc: &str) -> Result<Vec<u8>, IndyError> {
base64::decode_config(doc, base64::URL_SAFE)
.context("Invalid base64URL_SAFE sequence")
.context(IndyErrorKind::InvalidStructure)
.map_err(|err| err.into())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encode_works() {
let result = encode(&[1, 2, 3]);
assert_eq!("AQID", &result);
}
#[test]
fn decode_works() {
let result = decode("AQID");
assert!(result.is_ok(), "Got error");
assert_eq!(&[1, 2, 3], &result.unwrap()[..]);
}
#[test]
|
}
#[test]
fn decode_urlsafe_works() {
let result = decode_urlsafe("AQID");
assert!(result.is_ok(), "Got error");
assert_eq!(&[1, 2, 3], &result.unwrap()[..]);
}
}
|
fn encode_urlsafe_works() {
let result = encode_urlsafe(&[1, 2, 3]);
assert_eq!("AQID", &result);
|
random_line_split
|
rust_base64.rs
|
extern crate base64;
use errors::prelude::*;
use failure::ResultExt;
pub fn encode(doc: &[u8]) -> String {
base64::encode(doc)
}
pub fn decode(doc: &str) -> Result<Vec<u8>, IndyError> {
base64::decode(doc)
.context("Invalid base64 sequence")
.context(IndyErrorKind::InvalidStructure)
.map_err(|err| err.into())
}
pub fn encode_urlsafe(doc: &[u8]) -> String {
base64::encode_config(doc, base64::URL_SAFE)
}
pub fn decode_urlsafe(doc: &str) -> Result<Vec<u8>, IndyError> {
base64::decode_config(doc, base64::URL_SAFE)
.context("Invalid base64URL_SAFE sequence")
.context(IndyErrorKind::InvalidStructure)
.map_err(|err| err.into())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encode_works() {
let result = encode(&[1, 2, 3]);
assert_eq!("AQID", &result);
}
#[test]
fn decode_works() {
let result = decode("AQID");
assert!(result.is_ok(), "Got error");
assert_eq!(&[1, 2, 3], &result.unwrap()[..]);
}
#[test]
fn encode_urlsafe_works() {
let result = encode_urlsafe(&[1, 2, 3]);
assert_eq!("AQID", &result);
}
#[test]
fn decode_urlsafe_works()
|
}
|
{
let result = decode_urlsafe("AQID");
assert!(result.is_ok(), "Got error");
assert_eq!(&[1, 2, 3], &result.unwrap()[..]);
}
|
identifier_body
|
main.rs
|
use std::str;
use std::u32;
use std::fmt::Write;
extern crate crypto;
use crypto::md5::Md5;
use crypto::digest::Digest;
// It takes two hex digits to make a byte, so check
// the first 2.5 bytes for zero values
fn first_five_hex_zero(input: &[u8; 16]) -> bool {
let sum = (input[0] as u32) + (input[1] as u32) + (input[2] as u32 >> 4);
sum == 0
}
fn get_sixth_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[2]);
buff.as_bytes()[0] as char
}
fn get_seventh_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[3] >> 4);
buff.as_bytes()[0] as char
}
fn main() {
// Puzzle input
let door_id = "reyedfim";
let mut hasher = Md5::new();
let mut password = "".to_owned();
let target_password_length = 8;
// Static buffer to hold the result
let mut hash_output = [0u8; 16];
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_char = get_sixth_hex_char(&hash_output);
password.push(next_char);
}
if password.len() >= target_password_length {
break;
}
}
println!("Part 1: password = '{}'", password);
assert!(password == "f97c354d");
// Part 2
let mut password_part2_bytes = [0u8; 8]; //" ".as_bytes();
let mut set_characters = 0;
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_pos = get_sixth_hex_char(&hash_output);
|
if index < 8 {
if password_part2_bytes[index as usize] == 0 {
password_part2_bytes[index as usize] = next_char as u8;
set_characters += 1;
}
}
}
}
if set_characters == target_password_length {
break;
}
}
let password_part2 = str::from_utf8(&password_part2_bytes).unwrap();
println!("Part 2: password = '{}'", password_part2);
assert!(password_part2 == "863dde27");
}
#[test]
fn test1() {
let test_value = "abc3231929";
let mut hasher = Md5::new();
hasher.input(test_value.as_bytes());
let hash_result = hasher.result_str();
println!("{}", hash_result);
let mut buff = [0;16];
hasher.result(&mut buff);
let position = get_sixth_hex_char(&buff);
let character = get_seventh_hex_char(&buff);
println!("pos: {}", position);
println!("chr: {}", character);
}
|
let next_char = get_seventh_hex_char(&hash_output);
if let Some(index) = next_pos.to_digit(10) {
|
random_line_split
|
main.rs
|
use std::str;
use std::u32;
use std::fmt::Write;
extern crate crypto;
use crypto::md5::Md5;
use crypto::digest::Digest;
// It takes two hex digits to make a byte, so check
// the first 2.5 bytes for zero values
fn first_five_hex_zero(input: &[u8; 16]) -> bool {
let sum = (input[0] as u32) + (input[1] as u32) + (input[2] as u32 >> 4);
sum == 0
}
fn get_sixth_hex_char(input: &[u8; 16]) -> char
|
fn get_seventh_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[3] >> 4);
buff.as_bytes()[0] as char
}
fn main() {
// Puzzle input
let door_id = "reyedfim";
let mut hasher = Md5::new();
let mut password = "".to_owned();
let target_password_length = 8;
// Static buffer to hold the result
let mut hash_output = [0u8; 16];
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_char = get_sixth_hex_char(&hash_output);
password.push(next_char);
}
if password.len() >= target_password_length {
break;
}
}
println!("Part 1: password = '{}'", password);
assert!(password == "f97c354d");
// Part 2
let mut password_part2_bytes = [0u8; 8]; //" ".as_bytes();
let mut set_characters = 0;
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_pos = get_sixth_hex_char(&hash_output);
let next_char = get_seventh_hex_char(&hash_output);
if let Some(index) = next_pos.to_digit(10) {
if index < 8 {
if password_part2_bytes[index as usize] == 0 {
password_part2_bytes[index as usize] = next_char as u8;
set_characters += 1;
}
}
}
}
if set_characters == target_password_length {
break;
}
}
let password_part2 = str::from_utf8(&password_part2_bytes).unwrap();
println!("Part 2: password = '{}'", password_part2);
assert!(password_part2 == "863dde27");
}
#[test]
fn test1() {
let test_value = "abc3231929";
let mut hasher = Md5::new();
hasher.input(test_value.as_bytes());
let hash_result = hasher.result_str();
println!("{}", hash_result);
let mut buff = [0;16];
hasher.result(&mut buff);
let position = get_sixth_hex_char(&buff);
let character = get_seventh_hex_char(&buff);
println!("pos: {}", position);
println!("chr: {}", character);
}
|
{
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[2]);
buff.as_bytes()[0] as char
}
|
identifier_body
|
main.rs
|
use std::str;
use std::u32;
use std::fmt::Write;
extern crate crypto;
use crypto::md5::Md5;
use crypto::digest::Digest;
// It takes two hex digits to make a byte, so check
// the first 2.5 bytes for zero values
fn first_five_hex_zero(input: &[u8; 16]) -> bool {
let sum = (input[0] as u32) + (input[1] as u32) + (input[2] as u32 >> 4);
sum == 0
}
fn get_sixth_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[2]);
buff.as_bytes()[0] as char
}
fn get_seventh_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[3] >> 4);
buff.as_bytes()[0] as char
}
fn
|
() {
// Puzzle input
let door_id = "reyedfim";
let mut hasher = Md5::new();
let mut password = "".to_owned();
let target_password_length = 8;
// Static buffer to hold the result
let mut hash_output = [0u8; 16];
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_char = get_sixth_hex_char(&hash_output);
password.push(next_char);
}
if password.len() >= target_password_length {
break;
}
}
println!("Part 1: password = '{}'", password);
assert!(password == "f97c354d");
// Part 2
let mut password_part2_bytes = [0u8; 8]; //" ".as_bytes();
let mut set_characters = 0;
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_pos = get_sixth_hex_char(&hash_output);
let next_char = get_seventh_hex_char(&hash_output);
if let Some(index) = next_pos.to_digit(10) {
if index < 8 {
if password_part2_bytes[index as usize] == 0 {
password_part2_bytes[index as usize] = next_char as u8;
set_characters += 1;
}
}
}
}
if set_characters == target_password_length {
break;
}
}
let password_part2 = str::from_utf8(&password_part2_bytes).unwrap();
println!("Part 2: password = '{}'", password_part2);
assert!(password_part2 == "863dde27");
}
#[test]
fn test1() {
let test_value = "abc3231929";
let mut hasher = Md5::new();
hasher.input(test_value.as_bytes());
let hash_result = hasher.result_str();
println!("{}", hash_result);
let mut buff = [0;16];
hasher.result(&mut buff);
let position = get_sixth_hex_char(&buff);
let character = get_seventh_hex_char(&buff);
println!("pos: {}", position);
println!("chr: {}", character);
}
|
main
|
identifier_name
|
main.rs
|
use std::str;
use std::u32;
use std::fmt::Write;
extern crate crypto;
use crypto::md5::Md5;
use crypto::digest::Digest;
// It takes two hex digits to make a byte, so check
// the first 2.5 bytes for zero values
fn first_five_hex_zero(input: &[u8; 16]) -> bool {
let sum = (input[0] as u32) + (input[1] as u32) + (input[2] as u32 >> 4);
sum == 0
}
fn get_sixth_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[2]);
buff.as_bytes()[0] as char
}
fn get_seventh_hex_char(input: &[u8; 16]) -> char {
let mut buff = String::new();
let _ = write!(buff,"{:x}", input[3] >> 4);
buff.as_bytes()[0] as char
}
fn main() {
// Puzzle input
let door_id = "reyedfim";
let mut hasher = Md5::new();
let mut password = "".to_owned();
let target_password_length = 8;
// Static buffer to hold the result
let mut hash_output = [0u8; 16];
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_char = get_sixth_hex_char(&hash_output);
password.push(next_char);
}
if password.len() >= target_password_length
|
}
println!("Part 1: password = '{}'", password);
assert!(password == "f97c354d");
// Part 2
let mut password_part2_bytes = [0u8; 8]; //" ".as_bytes();
let mut set_characters = 0;
// Loop through door_id + [0..max] to find hashes that
// start with five zeros
for i in 0..u32::max_value() {
let mut test_value = String::new();
let _ = write!(test_value,"{}{}", door_id, i);
hasher.reset();
hasher.input(test_value.as_bytes());
hasher.result(&mut hash_output);
if first_five_hex_zero(&hash_output) {
let next_pos = get_sixth_hex_char(&hash_output);
let next_char = get_seventh_hex_char(&hash_output);
if let Some(index) = next_pos.to_digit(10) {
if index < 8 {
if password_part2_bytes[index as usize] == 0 {
password_part2_bytes[index as usize] = next_char as u8;
set_characters += 1;
}
}
}
}
if set_characters == target_password_length {
break;
}
}
let password_part2 = str::from_utf8(&password_part2_bytes).unwrap();
println!("Part 2: password = '{}'", password_part2);
assert!(password_part2 == "863dde27");
}
#[test]
fn test1() {
let test_value = "abc3231929";
let mut hasher = Md5::new();
hasher.input(test_value.as_bytes());
let hash_result = hasher.result_str();
println!("{}", hash_result);
let mut buff = [0;16];
hasher.result(&mut buff);
let position = get_sixth_hex_char(&buff);
let character = get_seventh_hex_char(&buff);
println!("pos: {}", position);
println!("chr: {}", character);
}
|
{
break;
}
|
conditional_block
|
compaction_filter.rs
|
//! `CompactionFilter` allows an application to modify/delete a key-value at
//! the time of compaction.
use std::os::raw::{c_char, c_int};
use rocks_sys as ll;
#[repr(C)]
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Copy, Clone)]
pub enum ValueType {
Value = 0,
MergeOperand = 1,
}
#[derive(Debug)]
pub enum Decision {
Keep,
Remove,
ChangeValue(Vec<u8>),
RemoveAndSkipUntil(Vec<u8>),
}
impl Decision {
// to C Decision type
fn to_c(&self) -> c_int {
match *self {
Decision::Keep => 0,
Decision::Remove => 1,
Decision::ChangeValue(_) => 2,
Decision::RemoveAndSkipUntil(_) => 3,
}
}
}
/// `CompactionFilter` allows an application to modify/delete a key-value at
/// the time of compaction.
pub trait CompactionFilter {
// The compaction process invokes this
// method for kv that is being compacted. A return value
// of false indicates that the kv should be preserved in the
// output of this compaction run and a return value of true
// indicates that this key-value should be removed from the
// output of the compaction. The application can inspect
// the existing value of the key and make decision based on it.
//
// Key-Values that are results of merge operation during compaction are not
// passed into this function. Currently, when you have a mix of Put()s and
// Merge()s on a same key, we only guarantee to process the merge operands
// through the compaction filters. Put()s might be processed, or might not.
//
// When the value is to be preserved, the application has the option
// to modify the existing_value and pass it back through new_value.
// value_changed needs to be set to true in this case.
//
// If you use snapshot feature of RocksDB (i.e. call GetSnapshot() API on a
// DB* object), CompactionFilter might not be very useful for you. Due to
// guarantees we need to maintain, compaction process will not call Filter()
// on any keys that were written before the latest snapshot. In other words,
// compaction will only call Filter() on keys written after your most recent
// call to GetSnapshot(). In most cases, Filter() will not be called very
// often. This is something we're fixing. See the discussion at:
// https://www.facebook.com/groups/mysqlonrocksdb/permalink/999723240091865/
//
// If multithreaded compaction is being used *and* a single CompactionFilter
// instance was supplied via Options::compaction_filter, this method may be
// called from different threads concurrently. The application must ensure
// that the call is thread-safe.
//
// If the CompactionFilter was created by a factory, then it will only ever
// be used by a single thread that is doing the compaction run, and this
// call does not need to be thread-safe. However, multiple filters may be
// in existence and operating concurrently.
//
// The last paragraph is not true if you set max_subcompactions to more than
// 1. In that case, subcompaction from multiple threads may call a single
// CompactionFilter concurrently.
//
// For rust:
// - None: false, indicates that the kv should be preserved in the output of this compaction run.
// - Some(None): true, indicates that this key-value should be removed from the output of the
// compaction.
// - Some(Some(vec![])): modify the existing_value and pass it back through new_value.
// fn filter(&self, level: u32, key: &[u8], existing_value: &[u8]) -> Option<Option<Vec<u8>>> {
// None
// }
//
// The compaction process invokes this method on every merge operand. If this
// method returns true, the merge operand will be ignored and not written out
// in the compaction output
//
// Note: If you are using a TransactionDB, it is not recommended to implement
// FilterMergeOperand(). If a Merge operation is filtered out, TransactionDB
// may not realize there is a write conflict and may allow a Transaction to
// Commit that should have failed. Instead, it is better to implement any
// Merge filtering inside the MergeOperator.
// fn filter_merge_operand(&self, level: u32, key: &[u8], operand: &[u8]) -> bool {
// false
// }
//
/// An extended API. Called for both values and merge operands.
/// Allows changing value and skipping ranges of keys.
/// The default implementation uses Filter() and FilterMergeOperand().
/// If you're overriding this method, no need to override the other two.
/// `value_type` indicates whether this key-value corresponds to a normal
/// value (e.g. written with Put()) or a merge operand (written with Merge()).
///
/// Possible return values:
/// * kKeep - keep the key-value pair.
/// * kRemove - remove the key-value pair or merge operand.
/// * kChangeValue - keep the key and change the value/operand to *new_value.
/// * kRemoveAndSkipUntil - remove this key-value pair, and also remove all key-value pairs
/// with key in [key, *skip_until). This range of keys will be skipped without reading,
/// potentially saving some IO operations compared to removing the keys one by one.
///
/// *skip_until <= key is treated the same as Decision::kKeep
/// (since the range [key, *skip_until) is empty).
///
/// The keys are skipped even if there are snapshots containing them,
/// as if IgnoreSnapshots() was true; i.e. values removed
/// by kRemoveAndSkipUntil can disappear from a snapshot - beware
/// if you're using TransactionDB or DB::GetSnapshot().
///
/// Another warning: if value for a key was overwritten or merged into
/// (multiple Put()s or Merge()s), and compaction filter skips this key
/// with kRemoveAndSkipUntil, it's possible that it will remove only
/// the new value, exposing the old value that was supposed to be
/// overwritten.
///
/// If you use kRemoveAndSkipUntil, consider also reducing
/// compaction_readahead_size option.
///
/// Note: If you are using a TransactionDB, it is not recommended to filter
/// out or modify merge operands (ValueType::kMergeOperand).
/// If a merge operation is filtered out, TransactionDB may not realize there
/// is a write conflict and may allow a Transaction to Commit that should have
/// failed. Instead, it is better to implement any Merge filtering inside the
/// MergeOperator.
///
/// Rust:
/// Decision for detailed return type.
fn filter(&mut self, level: i32, key: &[u8], value_type: ValueType, existing_value: &[u8]) -> Decision {
Decision::Keep
}
/// This function is deprecated. Snapshots will always be ignored for
/// compaction filters, because we realized that not ignoring snapshots doesn't
/// provide the gurantee we initially thought it would provide. Repeatable
/// reads will not be guaranteed anyway. If you override the function and
/// returns false, we will fail the compaction.
fn ignore_snapshots(&self) -> bool {
true
}
/// Returns a name that identifies this compaction filter.
/// The name will be printed to LOG file on start up for diagnosis.
fn name(&self) -> &str {
"RustCompactionFilterV2\0"
}
}
/// Each compaction will create a new `CompactionFilter` allowing the
/// application to know about different compactions
pub trait CompactionFilterFactory {
fn create_compaction_filter(&self, context: &Context) -> Box<dyn CompactionFilter>;
/// Returns a name that identifies this compaction filter factory.
fn name(&self) -> &str {
"RustCompactionFilterFactory\0"
}
}
/// Context information of a compaction run
#[repr(C)]
pub struct Context {
/// Does this compaction run include all data files
pub is_full_compaction: bool,
/// Is this compaction requested by the client (true),
/// or is it occurring as an automatic compaction process
pub is_manual_compaction: bool,
/// Which column family this compaction is for.
pub column_family_id: u32,
}
// call rust fn in C
#[doc(hidden)]
pub mod c {
use super::*;
#[no_mangle]
#[allow(mutable_transmutes)]
pub unsafe extern "C" fn rust_compaction_filter_call(
f: *mut (),
level: c_int,
key: &&[u8], // *Slice
value_type: ValueType,
existing_value: &&[u8], // *Slice
new_value: *mut (), // *std::string
skip_until: *mut (),
) -> c_int {
assert!(!f.is_null());
// FIXME: borrow as mutable
let filter = f as *mut &mut (dyn CompactionFilter + Sync);
// must be the same as C part
match (*filter).filter(level, key, value_type, existing_value) {
Decision::Keep => 0,
Decision::Remove => 1,
Decision::ChangeValue(nval) => {
ll::cxx_string_assign(new_value as *mut _, nval.as_ptr() as *const _, nval.len());
2
},
Decision::RemoveAndSkipUntil(skip) => {
ll::cxx_string_assign(skip_until as *mut _, skip.as_ptr() as *const _, skip.len());
3
},
}
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_drop(f: *mut ()) {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
Box::from_raw(filter);
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_name(f: *mut ()) -> *const c_char {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
(*filter).name().as_ptr() as _
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_ignore_snapshots(f: *mut ()) -> c_char {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
(*filter).ignore_snapshots() as _
}
|
mod tests {
use crate::rocksdb::*;
use super::*;
use lazy_static::lazy_static;
pub struct MyCompactionFilter;
impl CompactionFilter for MyCompactionFilter {
fn filter(&mut self, level: i32, key: &[u8], value_type: ValueType, existing_value: &[u8]) -> Decision {
assert_eq!(value_type, ValueType::Value); // haven't set up merge test
if existing_value == b"TO-BE-DELETED" {
Decision::Remove
} else if existing_value == b"an-typo-in-value" {
Decision::ChangeValue(b"a-typo-not-in-value".to_vec())
} else if key == b"key-0" {
Decision::RemoveAndSkipUntil(b"key-5".to_vec())
} else {
Decision::Keep
}
}
}
lazy_static! {
static ref MY_COMPACTION_FILTER: MyCompactionFilter = MyCompactionFilter;
}
#[test]
fn compaction_filter() {
let tmp_dir = ::tempdir::TempDir::new_in(".", "rocks").unwrap();
let db = DB::open(
Options::default()
.map_db_options(|db| db.create_if_missing(true))
.map_cf_options(|cf| cf.compaction_filter(&*MY_COMPACTION_FILTER)),
&tmp_dir,
)
.unwrap();
println!("compact and try remove range");
assert!(db.put(&WriteOptions::default(), b"key-0", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-1", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-2", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-3", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-4", b"23333").is_ok());
// following will be reserved
assert!(db.put(&WriteOptions::default(), b"key-5", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-6", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-7", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-8", b"23333").is_ok());
println!("compact and delete");
assert!(db
.put(&WriteOptions::default(), b"will-delete-me", b"TO-BE-DELETED")
.is_ok());
println!("compact and change value");
assert!(db
.put(&WriteOptions::default(), b"will-fix-me", b"an-typo-in-value")
.is_ok());
// now compact full range
let ret = db.compact_range(&Default::default(),..);
assert!(ret.is_ok(), "error: {:?}", ret);
assert!(db.get(&ReadOptions::default(), b"will-delete-me").is_err());
assert!(db
.get(&ReadOptions::default(), b"will-delete-me")
.unwrap_err()
.is_not_found());
assert!(db.get(&ReadOptions::default(), b"key-0").is_err());
assert!(db.get(&ReadOptions::default(), b"key-0").unwrap_err().is_not_found());
assert!(db.get(&ReadOptions::default(), b"key-4").is_err());
assert!(db.get(&ReadOptions::default(), b"key-4").unwrap_err().is_not_found());
assert_eq!(db.get(&ReadOptions::default(), b"key-5").unwrap(), b"23333");
assert_eq!(
db.get(&ReadOptions::default(), b"will-fix-me").unwrap(),
b"a-typo-not-in-value"
);
drop(db);
drop(tmp_dir);
}
}
|
}
#[cfg(test)]
|
random_line_split
|
compaction_filter.rs
|
//! `CompactionFilter` allows an application to modify/delete a key-value at
//! the time of compaction.
use std::os::raw::{c_char, c_int};
use rocks_sys as ll;
#[repr(C)]
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Copy, Clone)]
pub enum ValueType {
Value = 0,
MergeOperand = 1,
}
#[derive(Debug)]
pub enum Decision {
Keep,
Remove,
ChangeValue(Vec<u8>),
RemoveAndSkipUntil(Vec<u8>),
}
impl Decision {
// to C Decision type
fn to_c(&self) -> c_int {
match *self {
Decision::Keep => 0,
Decision::Remove => 1,
Decision::ChangeValue(_) => 2,
Decision::RemoveAndSkipUntil(_) => 3,
}
}
}
/// `CompactionFilter` allows an application to modify/delete a key-value at
/// the time of compaction.
pub trait CompactionFilter {
// The compaction process invokes this
// method for kv that is being compacted. A return value
// of false indicates that the kv should be preserved in the
// output of this compaction run and a return value of true
// indicates that this key-value should be removed from the
// output of the compaction. The application can inspect
// the existing value of the key and make decision based on it.
//
// Key-Values that are results of merge operation during compaction are not
// passed into this function. Currently, when you have a mix of Put()s and
// Merge()s on a same key, we only guarantee to process the merge operands
// through the compaction filters. Put()s might be processed, or might not.
//
// When the value is to be preserved, the application has the option
// to modify the existing_value and pass it back through new_value.
// value_changed needs to be set to true in this case.
//
// If you use snapshot feature of RocksDB (i.e. call GetSnapshot() API on a
// DB* object), CompactionFilter might not be very useful for you. Due to
// guarantees we need to maintain, compaction process will not call Filter()
// on any keys that were written before the latest snapshot. In other words,
// compaction will only call Filter() on keys written after your most recent
// call to GetSnapshot(). In most cases, Filter() will not be called very
// often. This is something we're fixing. See the discussion at:
// https://www.facebook.com/groups/mysqlonrocksdb/permalink/999723240091865/
//
// If multithreaded compaction is being used *and* a single CompactionFilter
// instance was supplied via Options::compaction_filter, this method may be
// called from different threads concurrently. The application must ensure
// that the call is thread-safe.
//
// If the CompactionFilter was created by a factory, then it will only ever
// be used by a single thread that is doing the compaction run, and this
// call does not need to be thread-safe. However, multiple filters may be
// in existence and operating concurrently.
//
// The last paragraph is not true if you set max_subcompactions to more than
// 1. In that case, subcompaction from multiple threads may call a single
// CompactionFilter concurrently.
//
// For rust:
// - None: false, indicates that the kv should be preserved in the output of this compaction run.
// - Some(None): true, indicates that this key-value should be removed from the output of the
// compaction.
// - Some(Some(vec![])): modify the existing_value and pass it back through new_value.
// fn filter(&self, level: u32, key: &[u8], existing_value: &[u8]) -> Option<Option<Vec<u8>>> {
// None
// }
//
// The compaction process invokes this method on every merge operand. If this
// method returns true, the merge operand will be ignored and not written out
// in the compaction output
//
// Note: If you are using a TransactionDB, it is not recommended to implement
// FilterMergeOperand(). If a Merge operation is filtered out, TransactionDB
// may not realize there is a write conflict and may allow a Transaction to
// Commit that should have failed. Instead, it is better to implement any
// Merge filtering inside the MergeOperator.
// fn filter_merge_operand(&self, level: u32, key: &[u8], operand: &[u8]) -> bool {
// false
// }
//
/// An extended API. Called for both values and merge operands.
/// Allows changing value and skipping ranges of keys.
/// The default implementation uses Filter() and FilterMergeOperand().
/// If you're overriding this method, no need to override the other two.
/// `value_type` indicates whether this key-value corresponds to a normal
/// value (e.g. written with Put()) or a merge operand (written with Merge()).
///
/// Possible return values:
/// * kKeep - keep the key-value pair.
/// * kRemove - remove the key-value pair or merge operand.
/// * kChangeValue - keep the key and change the value/operand to *new_value.
/// * kRemoveAndSkipUntil - remove this key-value pair, and also remove all key-value pairs
/// with key in [key, *skip_until). This range of keys will be skipped without reading,
/// potentially saving some IO operations compared to removing the keys one by one.
///
/// *skip_until <= key is treated the same as Decision::kKeep
/// (since the range [key, *skip_until) is empty).
///
/// The keys are skipped even if there are snapshots containing them,
/// as if IgnoreSnapshots() was true; i.e. values removed
/// by kRemoveAndSkipUntil can disappear from a snapshot - beware
/// if you're using TransactionDB or DB::GetSnapshot().
///
/// Another warning: if value for a key was overwritten or merged into
/// (multiple Put()s or Merge()s), and compaction filter skips this key
/// with kRemoveAndSkipUntil, it's possible that it will remove only
/// the new value, exposing the old value that was supposed to be
/// overwritten.
///
/// If you use kRemoveAndSkipUntil, consider also reducing
/// compaction_readahead_size option.
///
/// Note: If you are using a TransactionDB, it is not recommended to filter
/// out or modify merge operands (ValueType::kMergeOperand).
/// If a merge operation is filtered out, TransactionDB may not realize there
/// is a write conflict and may allow a Transaction to Commit that should have
/// failed. Instead, it is better to implement any Merge filtering inside the
/// MergeOperator.
///
/// Rust:
/// Decision for detailed return type.
fn filter(&mut self, level: i32, key: &[u8], value_type: ValueType, existing_value: &[u8]) -> Decision {
Decision::Keep
}
/// This function is deprecated. Snapshots will always be ignored for
/// compaction filters, because we realized that not ignoring snapshots doesn't
/// provide the gurantee we initially thought it would provide. Repeatable
/// reads will not be guaranteed anyway. If you override the function and
/// returns false, we will fail the compaction.
fn
|
(&self) -> bool {
true
}
/// Returns a name that identifies this compaction filter.
/// The name will be printed to LOG file on start up for diagnosis.
fn name(&self) -> &str {
"RustCompactionFilterV2\0"
}
}
/// Each compaction will create a new `CompactionFilter` allowing the
/// application to know about different compactions
pub trait CompactionFilterFactory {
fn create_compaction_filter(&self, context: &Context) -> Box<dyn CompactionFilter>;
/// Returns a name that identifies this compaction filter factory.
fn name(&self) -> &str {
"RustCompactionFilterFactory\0"
}
}
/// Context information of a compaction run
#[repr(C)]
pub struct Context {
/// Does this compaction run include all data files
pub is_full_compaction: bool,
/// Is this compaction requested by the client (true),
/// or is it occurring as an automatic compaction process
pub is_manual_compaction: bool,
/// Which column family this compaction is for.
pub column_family_id: u32,
}
// call rust fn in C
#[doc(hidden)]
pub mod c {
use super::*;
#[no_mangle]
#[allow(mutable_transmutes)]
pub unsafe extern "C" fn rust_compaction_filter_call(
f: *mut (),
level: c_int,
key: &&[u8], // *Slice
value_type: ValueType,
existing_value: &&[u8], // *Slice
new_value: *mut (), // *std::string
skip_until: *mut (),
) -> c_int {
assert!(!f.is_null());
// FIXME: borrow as mutable
let filter = f as *mut &mut (dyn CompactionFilter + Sync);
// must be the same as C part
match (*filter).filter(level, key, value_type, existing_value) {
Decision::Keep => 0,
Decision::Remove => 1,
Decision::ChangeValue(nval) => {
ll::cxx_string_assign(new_value as *mut _, nval.as_ptr() as *const _, nval.len());
2
},
Decision::RemoveAndSkipUntil(skip) => {
ll::cxx_string_assign(skip_until as *mut _, skip.as_ptr() as *const _, skip.len());
3
},
}
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_drop(f: *mut ()) {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
Box::from_raw(filter);
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_name(f: *mut ()) -> *const c_char {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
(*filter).name().as_ptr() as _
}
#[no_mangle]
pub unsafe extern "C" fn rust_compaction_filter_ignore_snapshots(f: *mut ()) -> c_char {
assert!(!f.is_null());
let filter = f as *mut &(dyn CompactionFilter + Sync);
(*filter).ignore_snapshots() as _
}
}
#[cfg(test)]
mod tests {
use crate::rocksdb::*;
use super::*;
use lazy_static::lazy_static;
pub struct MyCompactionFilter;
impl CompactionFilter for MyCompactionFilter {
fn filter(&mut self, level: i32, key: &[u8], value_type: ValueType, existing_value: &[u8]) -> Decision {
assert_eq!(value_type, ValueType::Value); // haven't set up merge test
if existing_value == b"TO-BE-DELETED" {
Decision::Remove
} else if existing_value == b"an-typo-in-value" {
Decision::ChangeValue(b"a-typo-not-in-value".to_vec())
} else if key == b"key-0" {
Decision::RemoveAndSkipUntil(b"key-5".to_vec())
} else {
Decision::Keep
}
}
}
lazy_static! {
static ref MY_COMPACTION_FILTER: MyCompactionFilter = MyCompactionFilter;
}
#[test]
fn compaction_filter() {
let tmp_dir = ::tempdir::TempDir::new_in(".", "rocks").unwrap();
let db = DB::open(
Options::default()
.map_db_options(|db| db.create_if_missing(true))
.map_cf_options(|cf| cf.compaction_filter(&*MY_COMPACTION_FILTER)),
&tmp_dir,
)
.unwrap();
println!("compact and try remove range");
assert!(db.put(&WriteOptions::default(), b"key-0", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-1", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-2", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-3", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-4", b"23333").is_ok());
// following will be reserved
assert!(db.put(&WriteOptions::default(), b"key-5", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-6", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-7", b"23333").is_ok());
assert!(db.put(&WriteOptions::default(), b"key-8", b"23333").is_ok());
println!("compact and delete");
assert!(db
.put(&WriteOptions::default(), b"will-delete-me", b"TO-BE-DELETED")
.is_ok());
println!("compact and change value");
assert!(db
.put(&WriteOptions::default(), b"will-fix-me", b"an-typo-in-value")
.is_ok());
// now compact full range
let ret = db.compact_range(&Default::default(),..);
assert!(ret.is_ok(), "error: {:?}", ret);
assert!(db.get(&ReadOptions::default(), b"will-delete-me").is_err());
assert!(db
.get(&ReadOptions::default(), b"will-delete-me")
.unwrap_err()
.is_not_found());
assert!(db.get(&ReadOptions::default(), b"key-0").is_err());
assert!(db.get(&ReadOptions::default(), b"key-0").unwrap_err().is_not_found());
assert!(db.get(&ReadOptions::default(), b"key-4").is_err());
assert!(db.get(&ReadOptions::default(), b"key-4").unwrap_err().is_not_found());
assert_eq!(db.get(&ReadOptions::default(), b"key-5").unwrap(), b"23333");
assert_eq!(
db.get(&ReadOptions::default(), b"will-fix-me").unwrap(),
b"a-typo-not-in-value"
);
drop(db);
drop(tmp_dir);
}
}
|
ignore_snapshots
|
identifier_name
|
dataflow.rs
|
use rustc_data_structures::fx::FxHashMap;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::{self, BasicBlock, Body, Location, Place};
use rustc_middle::ty::RegionVid;
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces};
use rustc_mir_dataflow::ResultsVisitable;
use rustc_mir_dataflow::{self, fmt::DebugWithContext, GenKill};
use rustc_mir_dataflow::{Analysis, Direction, Results};
use std::fmt;
use std::iter;
use crate::{
places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext, ToRegionVid,
};
/// A tuple with named fields that can hold either the results or the transient state of the
/// dataflow analyses used by the borrow checker.
#[derive(Debug)]
pub struct BorrowckAnalyses<B, U, E> {
pub borrows: B,
pub uninits: U,
pub ever_inits: E,
}
/// The results of the dataflow analyses used by the borrow checker.
pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses<
Results<'tcx, Borrows<'mir, 'tcx>>,
Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>,
>;
/// The transient state of the dataflow analyses used by the borrow checker.
pub type BorrowckFlowState<'mir, 'tcx> =
<BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState;
macro_rules! impl_visitable {
( $(
$T:ident { $( $field:ident : $A:ident ),* $(,)? }
)* ) => { $(
impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*>
where
$( $A: Analysis<'tcx, Direction = D>, )*
{
type Direction = D;
type FlowState = $T<$( $A::Domain ),*>;
fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
$T {
$( $field: self.$field.analysis.bottom_value(body) ),*
}
}
fn reset_to_block_entry(
&self,
state: &mut Self::FlowState,
block: BasicBlock,
) {
$( state.$field.clone_from(&self.$field.entry_set_for_block(block)); )*
}
fn reconstruct_before_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_before_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_terminator_effect(&mut state.$field, term, loc); )*
}
fn reconstruct_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_terminator_effect(&mut state.$field, term, loc); )*
}
}
)* }
}
impl_visitable! {
BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E }
}
rustc_index::newtype_index! {
pub struct BorrowIndex {
DEBUG_FORMAT = "bw{}"
}
}
/// `Borrows` stores the data used in the analyses that track the flow
/// of borrows.
///
/// It uniquely identifies every borrow (`Rvalue::Ref`) by a
/// `BorrowIndex`, and maps each such index to a `BorrowData`
/// describing the borrow. These indexes are used for representing the
/// borrows in compact bitvectors.
pub struct Borrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
struct StackEntry {
bb: mir::BasicBlock,
lo: usize,
hi: usize,
}
struct OutOfScopePrecomputer<'a, 'tcx> {
visited: BitSet<mir::BasicBlock>,
visit_stack: Vec<StackEntry>,
body: &'a Body<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
OutOfScopePrecomputer {
visited: BitSet::new_empty(body.basic_blocks().len()),
visit_stack: vec![],
body,
regioncx,
borrows_out_of_scope_at_location: FxHashMap::default(),
}
}
}
impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> {
fn precompute_borrows_out_of_scope(
&mut self,
borrow_index: BorrowIndex,
borrow_region: RegionVid,
location: Location,
) {
// We visit one BB at a time. The complication is that we may start in the
// middle of the first BB visited (the one containing `location`), in which
// case we may have to later on process the first part of that BB if there
// is a path back to its start.
// For visited BBs, we record the index of the first statement processed.
// (In fully processed BBs this index is 0.) Note also that we add BBs to
// `visited` once they are added to `stack`, before they are actually
// processed, because this avoids the need to look them up again on
// completion.
self.visited.insert(location.block);
let mut first_lo = location.statement_index;
let first_hi = self.body[location.block].statements.len();
self.visit_stack.push(StackEntry { bb: location.block, lo: first_lo, hi: first_hi });
while let Some(StackEntry { bb, lo, hi }) = self.visit_stack.pop() {
// If we process the first part of the first basic block (i.e. we encounter that block
// for the second time), we no longer have to visit its successors again.
let mut finished_early = bb == location.block && hi!= first_hi;
for i in lo..=hi {
let location = Location { block: bb, statement_index: i };
// If region does not contain a point at the location, then add to list and skip
// successor locations.
if!self.regioncx.region_contains(borrow_region, location) {
debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
self.borrows_out_of_scope_at_location
.entry(location)
.or_default()
.push(borrow_index);
finished_early = true;
break;
}
}
if!finished_early {
// Add successor BBs to the work list, if necessary.
let bb_data = &self.body[bb];
debug_assert!(hi == bb_data.statements.len());
for &succ_bb in bb_data.terminator().successors() {
if!self.visited.insert(succ_bb) {
if succ_bb == location.block && first_lo > 0 {
// `succ_bb` has been seen before. If it wasn't
// fully processed, add its first part to `stack`
// for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: first_lo - 1,
});
// And update this entry with 0, to represent the
// whole BB being processed.
first_lo = 0;
}
} else {
// succ_bb hasn't been seen before. Add it to
// `stack` for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: self.body[succ_bb].statements.len(),
});
}
}
}
}
self.visited.clear();
}
}
impl<'a, 'tcx> Borrows<'a, 'tcx> {
crate fn new(
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
nonlexical_regioncx: &'a RegionInferenceContext<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
) -> Self {
let mut prec = OutOfScopePrecomputer::new(body, nonlexical_regioncx);
for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
let borrow_region = borrow_data.region.to_region_vid();
let location = borrow_data.reserve_location;
prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location);
}
Borrows {
tcx,
body,
borrow_set,
borrows_out_of_scope_at_location: prec.borrows_out_of_scope_at_location,
}
}
pub fn location(&self, idx: BorrowIndex) -> &Location {
&self.borrow_set[idx].reserve_location
}
/// Add all borrows to the kill set, if those borrows are out of scope at `location`.
/// That means they went out of a nonlexical scope
fn kill_loans_out_of_scope_at_location(
&self,
trans: &mut impl GenKill<BorrowIndex>,
location: Location,
) {
// NOTE: The state associated with a given `location`
// reflects the dataflow on entry to the statement.
// Iterate over each of the borrows that we've precomputed
// to have went out of scope at this location and kill them.
//
// We are careful always to call this function *before* we
// set up the gen-bits for the statement or
// terminator. That way, if the effect of the statement or
// terminator *does* introduce a new loan of the same
// region, then setting that gen-bit will override any
// potential kill introduced here.
if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
trans.kill_all(indices.iter().copied());
}
}
/// Kill any borrows that conflict with `place`.
fn kill_borrows_on_place(&self, trans: &mut impl GenKill<BorrowIndex>, place: Place<'tcx>) {
debug!("kill_borrows_on_place: place={:?}", place);
let other_borrows_of_local = self
.borrow_set
.local_map
.get(&place.local)
.into_iter()
.flat_map(|bs| bs.iter())
.copied();
// If the borrowed place is a local with no projections, all other borrows of this
// local must conflict. This is purely an optimization so we don't have to call
// `places_conflict` for every borrow.
if place.projection.is_empty() {
if!self.body.local_decls[place.local].is_ref_to_static() {
trans.kill_all(other_borrows_of_local);
}
return;
}
// By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given
// pair of array indices are unequal, so that when `places_conflict` returns true, we
// will be assured that two places being compared definitely denotes the same sets of
// locations.
let definitely_conflicting_borrows = other_borrows_of_local.filter(|&i| {
places_conflict(
self.tcx,
self.body,
self.borrow_set[i].borrowed_place,
place,
PlaceConflictBias::NoOverlap,
)
});
trans.kill_all(definitely_conflicting_borrows);
}
}
impl<'tcx> rustc_mir_dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
type Domain = BitSet<BorrowIndex>;
const NAME: &'static str = "borrows";
fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
// bottom = nothing is reserved or activated yet;
BitSet::new_empty(self.borrow_set.len() * 2)
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
// no borrows of code region_scopes have been taken prior to
// function execution, so this method has no effect.
}
}
impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
type Idx = BorrowIndex;
fn before_statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_statement: &mir::Statement<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
stmt: &mir::Statement<'tcx>,
location: Location,
) {
match stmt.kind {
mir::StatementKind::Assign(box (lhs, ref rhs)) => {
if let mir::Rvalue::Ref(_, _, place) = *rhs {
if place.ignore_borrow(
self.tcx,
self.body,
&self.borrow_set.locals_state_at_exit,
) {
return;
}
let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
trans.gen(index);
}
// Make sure there are no remaining borrows for variables
// that are assigned over.
self.kill_borrows_on_place(trans, lhs);
}
mir::StatementKind::StorageDead(local) => {
// Make sure there are no remaining borrows for locals that
// are gone out of scope.
self.kill_borrows_on_place(trans, Place::from(local));
}
mir::StatementKind::LlvmInlineAsm(ref asm) => {
for (output, kind) in iter::zip(&*asm.outputs, &asm.asm.outputs) {
if!kind.is_indirect &&!kind.is_rw {
self.kill_borrows_on_place(trans, *output);
}
}
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::SetDiscriminant {.. }
| mir::StatementKind::StorageLive(..)
| mir::StatementKind::Retag {.. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Coverage(..)
| mir::StatementKind::CopyNonOverlapping(..)
| mir::StatementKind::Nop => {}
}
}
fn before_terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_terminator: &mir::Terminator<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
teminator: &mir::Terminator<'tcx>,
_location: Location,
)
|
fn call_return_effect(
&self,
_trans: &mut impl GenKill<Self::Idx>,
_block: mir::BasicBlock,
_func: &mir::Operand<'tcx>,
_args: &[mir::Operand<'tcx>],
_dest_place: mir::Place<'tcx>,
) {
}
}
impl DebugWithContext<Borrows<'_, '_>> for BorrowIndex {
fn fmt_with(&self, ctxt: &Borrows<'_, '_>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", ctxt.location(*self))
}
}
|
{
if let mir::TerminatorKind::InlineAsm { operands, .. } = &teminator.kind {
for op in operands {
if let mir::InlineAsmOperand::Out { place: Some(place), .. }
| mir::InlineAsmOperand::InOut { out_place: Some(place), .. } = *op
{
self.kill_borrows_on_place(trans, place);
}
}
}
}
|
identifier_body
|
dataflow.rs
|
use rustc_data_structures::fx::FxHashMap;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::{self, BasicBlock, Body, Location, Place};
use rustc_middle::ty::RegionVid;
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces};
use rustc_mir_dataflow::ResultsVisitable;
use rustc_mir_dataflow::{self, fmt::DebugWithContext, GenKill};
use rustc_mir_dataflow::{Analysis, Direction, Results};
use std::fmt;
use std::iter;
use crate::{
places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext, ToRegionVid,
};
/// A tuple with named fields that can hold either the results or the transient state of the
/// dataflow analyses used by the borrow checker.
#[derive(Debug)]
pub struct BorrowckAnalyses<B, U, E> {
pub borrows: B,
pub uninits: U,
pub ever_inits: E,
}
/// The results of the dataflow analyses used by the borrow checker.
pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses<
Results<'tcx, Borrows<'mir, 'tcx>>,
Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>,
>;
/// The transient state of the dataflow analyses used by the borrow checker.
pub type BorrowckFlowState<'mir, 'tcx> =
<BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState;
macro_rules! impl_visitable {
( $(
$T:ident { $( $field:ident : $A:ident ),* $(,)? }
)* ) => { $(
impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*>
where
$( $A: Analysis<'tcx, Direction = D>, )*
{
type Direction = D;
type FlowState = $T<$( $A::Domain ),*>;
fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
$T {
$( $field: self.$field.analysis.bottom_value(body) ),*
}
}
fn reset_to_block_entry(
&self,
state: &mut Self::FlowState,
block: BasicBlock,
) {
$( state.$field.clone_from(&self.$field.entry_set_for_block(block)); )*
}
fn reconstruct_before_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_before_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_terminator_effect(&mut state.$field, term, loc); )*
}
fn reconstruct_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_terminator_effect(&mut state.$field, term, loc); )*
}
}
)* }
}
impl_visitable! {
BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E }
}
rustc_index::newtype_index! {
pub struct BorrowIndex {
DEBUG_FORMAT = "bw{}"
}
}
/// `Borrows` stores the data used in the analyses that track the flow
/// of borrows.
///
/// It uniquely identifies every borrow (`Rvalue::Ref`) by a
/// `BorrowIndex`, and maps each such index to a `BorrowData`
/// describing the borrow. These indexes are used for representing the
/// borrows in compact bitvectors.
pub struct Borrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
struct StackEntry {
bb: mir::BasicBlock,
lo: usize,
hi: usize,
}
struct OutOfScopePrecomputer<'a, 'tcx> {
visited: BitSet<mir::BasicBlock>,
visit_stack: Vec<StackEntry>,
body: &'a Body<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
OutOfScopePrecomputer {
visited: BitSet::new_empty(body.basic_blocks().len()),
visit_stack: vec![],
body,
regioncx,
borrows_out_of_scope_at_location: FxHashMap::default(),
}
}
}
impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> {
fn precompute_borrows_out_of_scope(
&mut self,
borrow_index: BorrowIndex,
borrow_region: RegionVid,
location: Location,
) {
// We visit one BB at a time. The complication is that we may start in the
// middle of the first BB visited (the one containing `location`), in which
// case we may have to later on process the first part of that BB if there
// is a path back to its start.
// For visited BBs, we record the index of the first statement processed.
// (In fully processed BBs this index is 0.) Note also that we add BBs to
// `visited` once they are added to `stack`, before they are actually
// processed, because this avoids the need to look them up again on
// completion.
self.visited.insert(location.block);
let mut first_lo = location.statement_index;
let first_hi = self.body[location.block].statements.len();
self.visit_stack.push(StackEntry { bb: location.block, lo: first_lo, hi: first_hi });
while let Some(StackEntry { bb, lo, hi }) = self.visit_stack.pop() {
// If we process the first part of the first basic block (i.e. we encounter that block
// for the second time), we no longer have to visit its successors again.
let mut finished_early = bb == location.block && hi!= first_hi;
for i in lo..=hi {
let location = Location { block: bb, statement_index: i };
// If region does not contain a point at the location, then add to list and skip
// successor locations.
if!self.regioncx.region_contains(borrow_region, location) {
debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
self.borrows_out_of_scope_at_location
.entry(location)
.or_default()
.push(borrow_index);
finished_early = true;
break;
}
}
if!finished_early {
// Add successor BBs to the work list, if necessary.
let bb_data = &self.body[bb];
debug_assert!(hi == bb_data.statements.len());
for &succ_bb in bb_data.terminator().successors() {
if!self.visited.insert(succ_bb) {
if succ_bb == location.block && first_lo > 0 {
// `succ_bb` has been seen before. If it wasn't
// fully processed, add its first part to `stack`
// for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: first_lo - 1,
});
// And update this entry with 0, to represent the
// whole BB being processed.
first_lo = 0;
}
} else {
// succ_bb hasn't been seen before. Add it to
// `stack` for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: self.body[succ_bb].statements.len(),
});
}
}
}
}
self.visited.clear();
}
}
impl<'a, 'tcx> Borrows<'a, 'tcx> {
crate fn new(
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
nonlexical_regioncx: &'a RegionInferenceContext<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
) -> Self {
let mut prec = OutOfScopePrecomputer::new(body, nonlexical_regioncx);
for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
let borrow_region = borrow_data.region.to_region_vid();
let location = borrow_data.reserve_location;
prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location);
}
Borrows {
tcx,
body,
borrow_set,
borrows_out_of_scope_at_location: prec.borrows_out_of_scope_at_location,
}
}
pub fn location(&self, idx: BorrowIndex) -> &Location {
&self.borrow_set[idx].reserve_location
}
/// Add all borrows to the kill set, if those borrows are out of scope at `location`.
/// That means they went out of a nonlexical scope
fn kill_loans_out_of_scope_at_location(
&self,
trans: &mut impl GenKill<BorrowIndex>,
location: Location,
) {
// NOTE: The state associated with a given `location`
// reflects the dataflow on entry to the statement.
// Iterate over each of the borrows that we've precomputed
// to have went out of scope at this location and kill them.
//
// We are careful always to call this function *before* we
// set up the gen-bits for the statement or
// terminator. That way, if the effect of the statement or
// terminator *does* introduce a new loan of the same
// region, then setting that gen-bit will override any
// potential kill introduced here.
if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
trans.kill_all(indices.iter().copied());
}
}
/// Kill any borrows that conflict with `place`.
fn kill_borrows_on_place(&self, trans: &mut impl GenKill<BorrowIndex>, place: Place<'tcx>) {
debug!("kill_borrows_on_place: place={:?}", place);
let other_borrows_of_local = self
.borrow_set
.local_map
.get(&place.local)
.into_iter()
.flat_map(|bs| bs.iter())
.copied();
// If the borrowed place is a local with no projections, all other borrows of this
// local must conflict. This is purely an optimization so we don't have to call
// `places_conflict` for every borrow.
if place.projection.is_empty() {
if!self.body.local_decls[place.local].is_ref_to_static() {
trans.kill_all(other_borrows_of_local);
}
return;
}
// By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given
// pair of array indices are unequal, so that when `places_conflict` returns true, we
// will be assured that two places being compared definitely denotes the same sets of
// locations.
let definitely_conflicting_borrows = other_borrows_of_local.filter(|&i| {
places_conflict(
self.tcx,
self.body,
self.borrow_set[i].borrowed_place,
place,
PlaceConflictBias::NoOverlap,
)
});
trans.kill_all(definitely_conflicting_borrows);
}
}
impl<'tcx> rustc_mir_dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
type Domain = BitSet<BorrowIndex>;
const NAME: &'static str = "borrows";
fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
// bottom = nothing is reserved or activated yet;
BitSet::new_empty(self.borrow_set.len() * 2)
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
// no borrows of code region_scopes have been taken prior to
// function execution, so this method has no effect.
}
}
impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
type Idx = BorrowIndex;
fn before_statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_statement: &mir::Statement<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
stmt: &mir::Statement<'tcx>,
location: Location,
) {
match stmt.kind {
mir::StatementKind::Assign(box (lhs, ref rhs)) => {
if let mir::Rvalue::Ref(_, _, place) = *rhs {
if place.ignore_borrow(
self.tcx,
self.body,
&self.borrow_set.locals_state_at_exit,
) {
return;
|
trans.gen(index);
}
// Make sure there are no remaining borrows for variables
// that are assigned over.
self.kill_borrows_on_place(trans, lhs);
}
mir::StatementKind::StorageDead(local) => {
// Make sure there are no remaining borrows for locals that
// are gone out of scope.
self.kill_borrows_on_place(trans, Place::from(local));
}
mir::StatementKind::LlvmInlineAsm(ref asm) => {
for (output, kind) in iter::zip(&*asm.outputs, &asm.asm.outputs) {
if!kind.is_indirect &&!kind.is_rw {
self.kill_borrows_on_place(trans, *output);
}
}
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::SetDiscriminant {.. }
| mir::StatementKind::StorageLive(..)
| mir::StatementKind::Retag {.. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Coverage(..)
| mir::StatementKind::CopyNonOverlapping(..)
| mir::StatementKind::Nop => {}
}
}
fn before_terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_terminator: &mir::Terminator<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
teminator: &mir::Terminator<'tcx>,
_location: Location,
) {
if let mir::TerminatorKind::InlineAsm { operands,.. } = &teminator.kind {
for op in operands {
if let mir::InlineAsmOperand::Out { place: Some(place),.. }
| mir::InlineAsmOperand::InOut { out_place: Some(place),.. } = *op
{
self.kill_borrows_on_place(trans, place);
}
}
}
}
fn call_return_effect(
&self,
_trans: &mut impl GenKill<Self::Idx>,
_block: mir::BasicBlock,
_func: &mir::Operand<'tcx>,
_args: &[mir::Operand<'tcx>],
_dest_place: mir::Place<'tcx>,
) {
}
}
impl DebugWithContext<Borrows<'_, '_>> for BorrowIndex {
fn fmt_with(&self, ctxt: &Borrows<'_, '_>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", ctxt.location(*self))
}
}
|
}
let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
|
random_line_split
|
dataflow.rs
|
use rustc_data_structures::fx::FxHashMap;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::{self, BasicBlock, Body, Location, Place};
use rustc_middle::ty::RegionVid;
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces};
use rustc_mir_dataflow::ResultsVisitable;
use rustc_mir_dataflow::{self, fmt::DebugWithContext, GenKill};
use rustc_mir_dataflow::{Analysis, Direction, Results};
use std::fmt;
use std::iter;
use crate::{
places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext, ToRegionVid,
};
/// A tuple with named fields that can hold either the results or the transient state of the
/// dataflow analyses used by the borrow checker.
#[derive(Debug)]
pub struct BorrowckAnalyses<B, U, E> {
pub borrows: B,
pub uninits: U,
pub ever_inits: E,
}
/// The results of the dataflow analyses used by the borrow checker.
pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses<
Results<'tcx, Borrows<'mir, 'tcx>>,
Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>,
>;
/// The transient state of the dataflow analyses used by the borrow checker.
pub type BorrowckFlowState<'mir, 'tcx> =
<BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState;
macro_rules! impl_visitable {
( $(
$T:ident { $( $field:ident : $A:ident ),* $(,)? }
)* ) => { $(
impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*>
where
$( $A: Analysis<'tcx, Direction = D>, )*
{
type Direction = D;
type FlowState = $T<$( $A::Domain ),*>;
fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
$T {
$( $field: self.$field.analysis.bottom_value(body) ),*
}
}
fn reset_to_block_entry(
&self,
state: &mut Self::FlowState,
block: BasicBlock,
) {
$( state.$field.clone_from(&self.$field.entry_set_for_block(block)); )*
}
fn reconstruct_before_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_before_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_terminator_effect(&mut state.$field, term, loc); )*
}
fn reconstruct_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_terminator_effect(&mut state.$field, term, loc); )*
}
}
)* }
}
impl_visitable! {
BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E }
}
rustc_index::newtype_index! {
pub struct BorrowIndex {
DEBUG_FORMAT = "bw{}"
}
}
/// `Borrows` stores the data used in the analyses that track the flow
/// of borrows.
///
/// It uniquely identifies every borrow (`Rvalue::Ref`) by a
/// `BorrowIndex`, and maps each such index to a `BorrowData`
/// describing the borrow. These indexes are used for representing the
/// borrows in compact bitvectors.
pub struct Borrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
struct StackEntry {
bb: mir::BasicBlock,
lo: usize,
hi: usize,
}
struct OutOfScopePrecomputer<'a, 'tcx> {
visited: BitSet<mir::BasicBlock>,
visit_stack: Vec<StackEntry>,
body: &'a Body<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
OutOfScopePrecomputer {
visited: BitSet::new_empty(body.basic_blocks().len()),
visit_stack: vec![],
body,
regioncx,
borrows_out_of_scope_at_location: FxHashMap::default(),
}
}
}
impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> {
fn precompute_borrows_out_of_scope(
&mut self,
borrow_index: BorrowIndex,
borrow_region: RegionVid,
location: Location,
) {
// We visit one BB at a time. The complication is that we may start in the
// middle of the first BB visited (the one containing `location`), in which
// case we may have to later on process the first part of that BB if there
// is a path back to its start.
// For visited BBs, we record the index of the first statement processed.
// (In fully processed BBs this index is 0.) Note also that we add BBs to
// `visited` once they are added to `stack`, before they are actually
// processed, because this avoids the need to look them up again on
// completion.
self.visited.insert(location.block);
let mut first_lo = location.statement_index;
let first_hi = self.body[location.block].statements.len();
self.visit_stack.push(StackEntry { bb: location.block, lo: first_lo, hi: first_hi });
while let Some(StackEntry { bb, lo, hi }) = self.visit_stack.pop() {
// If we process the first part of the first basic block (i.e. we encounter that block
// for the second time), we no longer have to visit its successors again.
let mut finished_early = bb == location.block && hi!= first_hi;
for i in lo..=hi {
let location = Location { block: bb, statement_index: i };
// If region does not contain a point at the location, then add to list and skip
// successor locations.
if!self.regioncx.region_contains(borrow_region, location) {
debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
self.borrows_out_of_scope_at_location
.entry(location)
.or_default()
.push(borrow_index);
finished_early = true;
break;
}
}
if!finished_early {
// Add successor BBs to the work list, if necessary.
let bb_data = &self.body[bb];
debug_assert!(hi == bb_data.statements.len());
for &succ_bb in bb_data.terminator().successors() {
if!self.visited.insert(succ_bb) {
if succ_bb == location.block && first_lo > 0 {
// `succ_bb` has been seen before. If it wasn't
// fully processed, add its first part to `stack`
// for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: first_lo - 1,
});
// And update this entry with 0, to represent the
// whole BB being processed.
first_lo = 0;
}
} else {
// succ_bb hasn't been seen before. Add it to
// `stack` for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: self.body[succ_bb].statements.len(),
});
}
}
}
}
self.visited.clear();
}
}
impl<'a, 'tcx> Borrows<'a, 'tcx> {
crate fn new(
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
nonlexical_regioncx: &'a RegionInferenceContext<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
) -> Self {
let mut prec = OutOfScopePrecomputer::new(body, nonlexical_regioncx);
for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
let borrow_region = borrow_data.region.to_region_vid();
let location = borrow_data.reserve_location;
prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location);
}
Borrows {
tcx,
body,
borrow_set,
borrows_out_of_scope_at_location: prec.borrows_out_of_scope_at_location,
}
}
pub fn location(&self, idx: BorrowIndex) -> &Location {
&self.borrow_set[idx].reserve_location
}
/// Add all borrows to the kill set, if those borrows are out of scope at `location`.
/// That means they went out of a nonlexical scope
fn kill_loans_out_of_scope_at_location(
&self,
trans: &mut impl GenKill<BorrowIndex>,
location: Location,
) {
// NOTE: The state associated with a given `location`
// reflects the dataflow on entry to the statement.
// Iterate over each of the borrows that we've precomputed
// to have went out of scope at this location and kill them.
//
// We are careful always to call this function *before* we
// set up the gen-bits for the statement or
// terminator. That way, if the effect of the statement or
// terminator *does* introduce a new loan of the same
// region, then setting that gen-bit will override any
// potential kill introduced here.
if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
trans.kill_all(indices.iter().copied());
}
}
/// Kill any borrows that conflict with `place`.
fn kill_borrows_on_place(&self, trans: &mut impl GenKill<BorrowIndex>, place: Place<'tcx>) {
debug!("kill_borrows_on_place: place={:?}", place);
let other_borrows_of_local = self
.borrow_set
.local_map
.get(&place.local)
.into_iter()
.flat_map(|bs| bs.iter())
.copied();
// If the borrowed place is a local with no projections, all other borrows of this
// local must conflict. This is purely an optimization so we don't have to call
// `places_conflict` for every borrow.
if place.projection.is_empty() {
if!self.body.local_decls[place.local].is_ref_to_static() {
trans.kill_all(other_borrows_of_local);
}
return;
}
// By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given
// pair of array indices are unequal, so that when `places_conflict` returns true, we
// will be assured that two places being compared definitely denotes the same sets of
// locations.
let definitely_conflicting_borrows = other_borrows_of_local.filter(|&i| {
places_conflict(
self.tcx,
self.body,
self.borrow_set[i].borrowed_place,
place,
PlaceConflictBias::NoOverlap,
)
});
trans.kill_all(definitely_conflicting_borrows);
}
}
impl<'tcx> rustc_mir_dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
type Domain = BitSet<BorrowIndex>;
const NAME: &'static str = "borrows";
fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
// bottom = nothing is reserved or activated yet;
BitSet::new_empty(self.borrow_set.len() * 2)
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
// no borrows of code region_scopes have been taken prior to
// function execution, so this method has no effect.
}
}
impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
type Idx = BorrowIndex;
fn before_statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_statement: &mir::Statement<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
stmt: &mir::Statement<'tcx>,
location: Location,
) {
match stmt.kind {
mir::StatementKind::Assign(box (lhs, ref rhs)) => {
if let mir::Rvalue::Ref(_, _, place) = *rhs {
if place.ignore_borrow(
self.tcx,
self.body,
&self.borrow_set.locals_state_at_exit,
) {
return;
}
let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
trans.gen(index);
}
// Make sure there are no remaining borrows for variables
// that are assigned over.
self.kill_borrows_on_place(trans, lhs);
}
mir::StatementKind::StorageDead(local) =>
|
mir::StatementKind::LlvmInlineAsm(ref asm) => {
for (output, kind) in iter::zip(&*asm.outputs, &asm.asm.outputs) {
if!kind.is_indirect &&!kind.is_rw {
self.kill_borrows_on_place(trans, *output);
}
}
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::SetDiscriminant {.. }
| mir::StatementKind::StorageLive(..)
| mir::StatementKind::Retag {.. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Coverage(..)
| mir::StatementKind::CopyNonOverlapping(..)
| mir::StatementKind::Nop => {}
}
}
fn before_terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_terminator: &mir::Terminator<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
teminator: &mir::Terminator<'tcx>,
_location: Location,
) {
if let mir::TerminatorKind::InlineAsm { operands,.. } = &teminator.kind {
for op in operands {
if let mir::InlineAsmOperand::Out { place: Some(place),.. }
| mir::InlineAsmOperand::InOut { out_place: Some(place),.. } = *op
{
self.kill_borrows_on_place(trans, place);
}
}
}
}
fn call_return_effect(
&self,
_trans: &mut impl GenKill<Self::Idx>,
_block: mir::BasicBlock,
_func: &mir::Operand<'tcx>,
_args: &[mir::Operand<'tcx>],
_dest_place: mir::Place<'tcx>,
) {
}
}
impl DebugWithContext<Borrows<'_, '_>> for BorrowIndex {
fn fmt_with(&self, ctxt: &Borrows<'_, '_>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", ctxt.location(*self))
}
}
|
{
// Make sure there are no remaining borrows for locals that
// are gone out of scope.
self.kill_borrows_on_place(trans, Place::from(local));
}
|
conditional_block
|
dataflow.rs
|
use rustc_data_structures::fx::FxHashMap;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::{self, BasicBlock, Body, Location, Place};
use rustc_middle::ty::RegionVid;
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces};
use rustc_mir_dataflow::ResultsVisitable;
use rustc_mir_dataflow::{self, fmt::DebugWithContext, GenKill};
use rustc_mir_dataflow::{Analysis, Direction, Results};
use std::fmt;
use std::iter;
use crate::{
places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext, ToRegionVid,
};
/// A tuple with named fields that can hold either the results or the transient state of the
/// dataflow analyses used by the borrow checker.
#[derive(Debug)]
pub struct
|
<B, U, E> {
pub borrows: B,
pub uninits: U,
pub ever_inits: E,
}
/// The results of the dataflow analyses used by the borrow checker.
pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses<
Results<'tcx, Borrows<'mir, 'tcx>>,
Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>,
>;
/// The transient state of the dataflow analyses used by the borrow checker.
pub type BorrowckFlowState<'mir, 'tcx> =
<BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState;
macro_rules! impl_visitable {
( $(
$T:ident { $( $field:ident : $A:ident ),* $(,)? }
)* ) => { $(
impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*>
where
$( $A: Analysis<'tcx, Direction = D>, )*
{
type Direction = D;
type FlowState = $T<$( $A::Domain ),*>;
fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
$T {
$( $field: self.$field.analysis.bottom_value(body) ),*
}
}
fn reset_to_block_entry(
&self,
state: &mut Self::FlowState,
block: BasicBlock,
) {
$( state.$field.clone_from(&self.$field.entry_set_for_block(block)); )*
}
fn reconstruct_before_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_statement_effect(
&self,
state: &mut Self::FlowState,
stmt: &mir::Statement<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_statement_effect(&mut state.$field, stmt, loc); )*
}
fn reconstruct_before_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_before_terminator_effect(&mut state.$field, term, loc); )*
}
fn reconstruct_terminator_effect(
&self,
state: &mut Self::FlowState,
term: &mir::Terminator<'tcx>,
loc: Location,
) {
$( self.$field.analysis
.apply_terminator_effect(&mut state.$field, term, loc); )*
}
}
)* }
}
impl_visitable! {
BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E }
}
rustc_index::newtype_index! {
pub struct BorrowIndex {
DEBUG_FORMAT = "bw{}"
}
}
/// `Borrows` stores the data used in the analyses that track the flow
/// of borrows.
///
/// It uniquely identifies every borrow (`Rvalue::Ref`) by a
/// `BorrowIndex`, and maps each such index to a `BorrowData`
/// describing the borrow. These indexes are used for representing the
/// borrows in compact bitvectors.
pub struct Borrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
struct StackEntry {
bb: mir::BasicBlock,
lo: usize,
hi: usize,
}
struct OutOfScopePrecomputer<'a, 'tcx> {
visited: BitSet<mir::BasicBlock>,
visit_stack: Vec<StackEntry>,
body: &'a Body<'tcx>,
regioncx: &'a RegionInferenceContext<'tcx>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
OutOfScopePrecomputer {
visited: BitSet::new_empty(body.basic_blocks().len()),
visit_stack: vec![],
body,
regioncx,
borrows_out_of_scope_at_location: FxHashMap::default(),
}
}
}
impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> {
fn precompute_borrows_out_of_scope(
&mut self,
borrow_index: BorrowIndex,
borrow_region: RegionVid,
location: Location,
) {
// We visit one BB at a time. The complication is that we may start in the
// middle of the first BB visited (the one containing `location`), in which
// case we may have to later on process the first part of that BB if there
// is a path back to its start.
// For visited BBs, we record the index of the first statement processed.
// (In fully processed BBs this index is 0.) Note also that we add BBs to
// `visited` once they are added to `stack`, before they are actually
// processed, because this avoids the need to look them up again on
// completion.
self.visited.insert(location.block);
let mut first_lo = location.statement_index;
let first_hi = self.body[location.block].statements.len();
self.visit_stack.push(StackEntry { bb: location.block, lo: first_lo, hi: first_hi });
while let Some(StackEntry { bb, lo, hi }) = self.visit_stack.pop() {
// If we process the first part of the first basic block (i.e. we encounter that block
// for the second time), we no longer have to visit its successors again.
let mut finished_early = bb == location.block && hi!= first_hi;
for i in lo..=hi {
let location = Location { block: bb, statement_index: i };
// If region does not contain a point at the location, then add to list and skip
// successor locations.
if!self.regioncx.region_contains(borrow_region, location) {
debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
self.borrows_out_of_scope_at_location
.entry(location)
.or_default()
.push(borrow_index);
finished_early = true;
break;
}
}
if!finished_early {
// Add successor BBs to the work list, if necessary.
let bb_data = &self.body[bb];
debug_assert!(hi == bb_data.statements.len());
for &succ_bb in bb_data.terminator().successors() {
if!self.visited.insert(succ_bb) {
if succ_bb == location.block && first_lo > 0 {
// `succ_bb` has been seen before. If it wasn't
// fully processed, add its first part to `stack`
// for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: first_lo - 1,
});
// And update this entry with 0, to represent the
// whole BB being processed.
first_lo = 0;
}
} else {
// succ_bb hasn't been seen before. Add it to
// `stack` for processing.
self.visit_stack.push(StackEntry {
bb: succ_bb,
lo: 0,
hi: self.body[succ_bb].statements.len(),
});
}
}
}
}
self.visited.clear();
}
}
impl<'a, 'tcx> Borrows<'a, 'tcx> {
crate fn new(
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
nonlexical_regioncx: &'a RegionInferenceContext<'tcx>,
borrow_set: &'a BorrowSet<'tcx>,
) -> Self {
let mut prec = OutOfScopePrecomputer::new(body, nonlexical_regioncx);
for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
let borrow_region = borrow_data.region.to_region_vid();
let location = borrow_data.reserve_location;
prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location);
}
Borrows {
tcx,
body,
borrow_set,
borrows_out_of_scope_at_location: prec.borrows_out_of_scope_at_location,
}
}
pub fn location(&self, idx: BorrowIndex) -> &Location {
&self.borrow_set[idx].reserve_location
}
/// Add all borrows to the kill set, if those borrows are out of scope at `location`.
/// That means they went out of a nonlexical scope
fn kill_loans_out_of_scope_at_location(
&self,
trans: &mut impl GenKill<BorrowIndex>,
location: Location,
) {
// NOTE: The state associated with a given `location`
// reflects the dataflow on entry to the statement.
// Iterate over each of the borrows that we've precomputed
// to have went out of scope at this location and kill them.
//
// We are careful always to call this function *before* we
// set up the gen-bits for the statement or
// terminator. That way, if the effect of the statement or
// terminator *does* introduce a new loan of the same
// region, then setting that gen-bit will override any
// potential kill introduced here.
if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
trans.kill_all(indices.iter().copied());
}
}
/// Kill any borrows that conflict with `place`.
fn kill_borrows_on_place(&self, trans: &mut impl GenKill<BorrowIndex>, place: Place<'tcx>) {
debug!("kill_borrows_on_place: place={:?}", place);
let other_borrows_of_local = self
.borrow_set
.local_map
.get(&place.local)
.into_iter()
.flat_map(|bs| bs.iter())
.copied();
// If the borrowed place is a local with no projections, all other borrows of this
// local must conflict. This is purely an optimization so we don't have to call
// `places_conflict` for every borrow.
if place.projection.is_empty() {
if!self.body.local_decls[place.local].is_ref_to_static() {
trans.kill_all(other_borrows_of_local);
}
return;
}
// By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given
// pair of array indices are unequal, so that when `places_conflict` returns true, we
// will be assured that two places being compared definitely denotes the same sets of
// locations.
let definitely_conflicting_borrows = other_borrows_of_local.filter(|&i| {
places_conflict(
self.tcx,
self.body,
self.borrow_set[i].borrowed_place,
place,
PlaceConflictBias::NoOverlap,
)
});
trans.kill_all(definitely_conflicting_borrows);
}
}
impl<'tcx> rustc_mir_dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
type Domain = BitSet<BorrowIndex>;
const NAME: &'static str = "borrows";
fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
// bottom = nothing is reserved or activated yet;
BitSet::new_empty(self.borrow_set.len() * 2)
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
// no borrows of code region_scopes have been taken prior to
// function execution, so this method has no effect.
}
}
impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
type Idx = BorrowIndex;
fn before_statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_statement: &mir::Statement<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn statement_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
stmt: &mir::Statement<'tcx>,
location: Location,
) {
match stmt.kind {
mir::StatementKind::Assign(box (lhs, ref rhs)) => {
if let mir::Rvalue::Ref(_, _, place) = *rhs {
if place.ignore_borrow(
self.tcx,
self.body,
&self.borrow_set.locals_state_at_exit,
) {
return;
}
let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
trans.gen(index);
}
// Make sure there are no remaining borrows for variables
// that are assigned over.
self.kill_borrows_on_place(trans, lhs);
}
mir::StatementKind::StorageDead(local) => {
// Make sure there are no remaining borrows for locals that
// are gone out of scope.
self.kill_borrows_on_place(trans, Place::from(local));
}
mir::StatementKind::LlvmInlineAsm(ref asm) => {
for (output, kind) in iter::zip(&*asm.outputs, &asm.asm.outputs) {
if!kind.is_indirect &&!kind.is_rw {
self.kill_borrows_on_place(trans, *output);
}
}
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::SetDiscriminant {.. }
| mir::StatementKind::StorageLive(..)
| mir::StatementKind::Retag {.. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Coverage(..)
| mir::StatementKind::CopyNonOverlapping(..)
| mir::StatementKind::Nop => {}
}
}
fn before_terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
_terminator: &mir::Terminator<'tcx>,
location: Location,
) {
self.kill_loans_out_of_scope_at_location(trans, location);
}
fn terminator_effect(
&self,
trans: &mut impl GenKill<Self::Idx>,
teminator: &mir::Terminator<'tcx>,
_location: Location,
) {
if let mir::TerminatorKind::InlineAsm { operands,.. } = &teminator.kind {
for op in operands {
if let mir::InlineAsmOperand::Out { place: Some(place),.. }
| mir::InlineAsmOperand::InOut { out_place: Some(place),.. } = *op
{
self.kill_borrows_on_place(trans, place);
}
}
}
}
fn call_return_effect(
&self,
_trans: &mut impl GenKill<Self::Idx>,
_block: mir::BasicBlock,
_func: &mir::Operand<'tcx>,
_args: &[mir::Operand<'tcx>],
_dest_place: mir::Place<'tcx>,
) {
}
}
impl DebugWithContext<Borrows<'_, '_>> for BorrowIndex {
fn fmt_with(&self, ctxt: &Borrows<'_, '_>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", ctxt.location(*self))
}
}
|
BorrowckAnalyses
|
identifier_name
|
overflowing-rsh-4.rs
|
// build-fail
// compile-flags: -C debug-assertions
// This function is checking that our (type-based) automatic
// truncation does not sidestep the overflow checking.
#![deny(arithmetic_overflow, const_err)]
fn main() {
// this signals overflow when checking is on
let x = 2_i8 >> 17;
//~^ ERROR: this arithmetic operation will overflow
//... but when checking is off, the fallback will truncate the
// input to its lower three bits (= 1). Note that this is *not*
// the behavior of the x86 processor for 8- and 16-bit types,
// but it is necessary to avoid undefined behavior from LLVM.
//
// We check that here, by ensuring the result is not zero; if
|
// produce the error expected above).
assert_eq!(x, 1_i8);
}
|
// overflow checking is turned off, then this assertion will pass
// (and the compiletest driver will report that the test did not
|
random_line_split
|
overflowing-rsh-4.rs
|
// build-fail
// compile-flags: -C debug-assertions
// This function is checking that our (type-based) automatic
// truncation does not sidestep the overflow checking.
#![deny(arithmetic_overflow, const_err)]
fn main()
|
{
// this signals overflow when checking is on
let x = 2_i8 >> 17;
//~^ ERROR: this arithmetic operation will overflow
// ... but when checking is off, the fallback will truncate the
// input to its lower three bits (= 1). Note that this is *not*
// the behavior of the x86 processor for 8- and 16-bit types,
// but it is necessary to avoid undefined behavior from LLVM.
//
// We check that here, by ensuring the result is not zero; if
// overflow checking is turned off, then this assertion will pass
// (and the compiletest driver will report that the test did not
// produce the error expected above).
assert_eq!(x, 1_i8);
}
|
identifier_body
|
|
overflowing-rsh-4.rs
|
// build-fail
// compile-flags: -C debug-assertions
// This function is checking that our (type-based) automatic
// truncation does not sidestep the overflow checking.
#![deny(arithmetic_overflow, const_err)]
fn
|
() {
// this signals overflow when checking is on
let x = 2_i8 >> 17;
//~^ ERROR: this arithmetic operation will overflow
//... but when checking is off, the fallback will truncate the
// input to its lower three bits (= 1). Note that this is *not*
// the behavior of the x86 processor for 8- and 16-bit types,
// but it is necessary to avoid undefined behavior from LLVM.
//
// We check that here, by ensuring the result is not zero; if
// overflow checking is turned off, then this assertion will pass
// (and the compiletest driver will report that the test did not
// produce the error expected above).
assert_eq!(x, 1_i8);
}
|
main
|
identifier_name
|
macros.rs
|
codespan_reporting::diagnostic::Diagnostic,
downcast_rs::{impl_downcast, Downcast},
futures::{prelude::*, task::Spawn},
};
use gluon_codegen::Trace;
use crate::base::{
ast::{self, Expr, MutVisitor, SpannedExpr},
error::{AsDiagnostic, Errors as BaseErrors, Salvage, SalvageResult},
fnv::FnvMap,
pos,
pos::{BytePos, Spanned},
source::FileId,
symbol::{Symbol, Symbols},
};
use crate::{
gc::Trace,
thread::{RootedThread, Thread},
};
pub type SpannedError = Spanned<Error, BytePos>;
pub type Errors = BaseErrors<SpannedError>;
pub type MacroResult<'ast> = Result<SpannedExpr<'ast, Symbol>, Error>;
pub type SalvageMacroResult<'ast> = SalvageResult<SpannedExpr<'ast, Symbol>, Error>;
pub enum LazyMacroResult<'ast> {
Done(SpannedExpr<'ast, Symbol>),
Lazy(
Box<
dyn for<'a> FnOnce() -> Pin<
Box<dyn Future<Output = SalvageMacroResult<'ast>> + Send + 'ast>,
> + Send
+ 'ast,
>,
),
}
impl<'ast> LazyMacroResult<'ast> {
async fn compute(self) -> SalvageMacroResult<'ast> {
match self {
Self::Done(r) => Ok(r),
Self::Lazy(f) => f().await,
}
}
}
impl<'ast> From<SpannedExpr<'ast, Symbol>> for LazyMacroResult<'ast> {
fn from(r: SpannedExpr<'ast, Symbol>) -> Self
|
}
impl<'ast, F> From<F> for LazyMacroResult<'ast>
where
for<'a> F: FnOnce() -> Pin<Box<dyn Future<Output = SalvageMacroResult<'ast>> + Send + 'ast>>
+ Send
+ 'ast,
{
fn from(r: F) -> Self {
Self::Lazy(Box::new(r))
}
}
pub type MacroFuture<'r, 'ast> =
Pin<Box<dyn Future<Output = Result<LazyMacroResult<'ast>, Error>> + Send + 'r>>;
pub trait Captures<'a> {}
impl<T> Captures<'_> for T {}
pub trait DowncastArc: Downcast {
fn into_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync>;
}
impl<T> DowncastArc for T
where
T: Downcast + Send + Sync,
{
fn into_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
self
}
}
pub trait MacroError: DowncastArc + StdError + AsDiagnostic + Send + Sync +'static {
fn clone_error(&self) -> Error;
fn eq_error(&self, other: &dyn MacroError) -> bool;
fn hash_error(&self, hash: &mut dyn std::hash::Hasher);
}
impl_downcast!(MacroError);
impl dyn MacroError {
#[inline]
pub fn downcast_arc<T: MacroError>(self: Arc<Self>) -> Result<Arc<T>, Arc<Self>>
where
Self: Send + Sync,
{
if self.is::<T>() {
Ok(DowncastArc::into_arc_any(self).downcast::<T>().unwrap())
} else {
Err(self)
}
}
}
impl<T> MacroError for T
where
T: Clone + PartialEq + std::hash::Hash + AsDiagnostic + StdError + Send + Sync +'static,
{
fn clone_error(&self) -> Error {
Error(Box::new(self.clone()))
}
fn eq_error(&self, other: &dyn MacroError) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |other| self == other)
}
fn hash_error(&self, mut hash: &mut dyn std::hash::Hasher) {
self.hash(&mut hash)
}
}
#[derive(Debug)]
pub struct Error(Box<dyn MacroError>);
impl StdError for Error {
#[allow(deprecated)]
fn description(&self) -> &str {
self.0.description()
}
fn source(&self) -> Option<&(dyn StdError +'static)> {
self.0.source()
}
}
impl AsDiagnostic for Error {
fn as_diagnostic(&self, map: &base::source::CodeMap) -> Diagnostic<FileId> {
self.0.as_diagnostic(map)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Clone for Error {
fn clone(&self) -> Self {
self.0.clone_error()
}
}
impl Eq for Error {}
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
self.0.eq_error(&*other.0)
}
}
impl std::hash::Hash for Error {
fn hash<H>(&self, state: &mut H)
where
H: std::hash::Hasher,
{
self.0.hash_error(state)
}
}
impl Error {
pub fn new<E>(err: E) -> Self
where
E: MacroError,
{
Self(Box::new(err))
}
pub fn message(s: impl Into<String>) -> Error {
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
struct StringError(String);
impl StdError for StringError {
fn description(&self) -> &str {
&self.0
}
}
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl AsDiagnostic for StringError {
fn as_diagnostic(&self, _map: &base::source::CodeMap) -> Diagnostic<FileId> {
Diagnostic::error().with_message(self.to_string())
}
}
Self::new(StringError(s.into()))
}
pub fn downcast<T>(self) -> Result<Box<T>, Self>
where
T: MacroError,
{
self.0.downcast().map_err(Self)
}
}
/// A trait which abstracts over macros.
///
/// A macro is similiar to a function call but is run at compile time instead of at runtime.
pub trait Macro: Trace + DowncastArc + Send + Sync {
fn get_capability<T>(&self, thread: &Thread, arc_self: &Arc<dyn Macro>) -> Option<T>
where
Self: Sized,
T: Any,
{
self.get_capability_impl(thread, arc_self, TypeId::of::<T>())
.map(|b| {
*b.downcast::<T>()
.ok()
.expect("get_capability_impl return an unexpected type")
})
}
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
let _ = (thread, arc_self, id);
None
}
/// Creating a symbol in `symbols` will put it in the same scope as the code surrounding the
/// expansion. If you want to create a unique symbol then call `Symbol::from` or create a new
/// `Symbols` table
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast>;
}
impl_downcast!(Macro);
impl dyn Macro {
#[inline]
pub fn downcast_arc<T: Macro>(self: Arc<Self>) -> Result<Arc<T>, Arc<Self>>
where
Self: Send + Sync,
{
if self.is::<T>() {
Ok(DowncastArc::into_arc_any(self).downcast::<T>().unwrap())
} else {
Err(self)
}
}
}
#[async_trait::async_trait]
impl<M> Macro for Box<M>
where
M: Macro +?Sized,
{
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
(**self).get_capability_impl(thread, arc_self, id)
}
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast> {
(**self).expand(env, symbols, arena, args)
}
}
#[async_trait::async_trait]
impl<M> Macro for Arc<M>
where
M: Macro +?Sized,
{
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
(**self).get_capability_impl(thread, arc_self, id)
}
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast> {
(**self).expand(env, symbols, arena, args)
}
}
pub trait MacroUserdata: Send {
fn fork(&self, thread: RootedThread) -> Box<dyn Any>;
}
/// Type containing macros bound to symbols which can be applied on an AST expression to transform
/// it.
#[derive(Trace, Default)]
#[gluon(gluon_vm)]
pub struct MacroEnv {
macros: RwLock<FnvMap<String, Arc<dyn Macro>>>,
}
impl MacroEnv {
/// Creates a new `MacroEnv`
pub fn new() -> MacroEnv {
MacroEnv {
macros: RwLock::new(FnvMap::default()),
}
}
/// Inserts a `Macro` which acts on any occurance of `symbol` when applied to an expression.
pub fn insert<M>(&self, name: String, mac: M)
where
M: Macro +'static,
{
self.macros.write().unwrap().insert(name, Arc::new(mac));
}
/// Retrieves the macro bound to `symbol`
pub fn get(&self, name: &str) -> Option<Arc<dyn Macro>> {
self.macros.read().unwrap().get(name).cloned()
}
pub fn get_capabilities<T>(&self, thread: &Thread) -> Vec<T>
where
T: Any,
{
let macros = self.macros.read().unwrap();
macros
.values()
.filter_map(|mac| mac.get_capability::<T>(thread, mac))
.collect()
}
pub fn get_capability<T>(&self, thread: &Thread) -> Option<T>
where
T: Any,
{
let macros = self.macros.read().unwrap();
macros
.values()
.find_map(|mac| mac.get_capability::<T>(thread, mac))
}
pub fn clear(&self) {
self.macros.write().unwrap().clear();
}
/// Runs the macros in this `MacroEnv` on `expr` using `env` as the context of the expansion
pub async fn run<'ast>(
&self,
vm: &Thread,
userdata: &mut (dyn MacroUserdata + '_),
spawn: Option<&(dyn Spawn + Send + Sync + '_)>,
symbols: &mut Symbols,
arena: ast::OwnedArena<'ast, Symbol>,
expr: &'ast mut SpannedExpr<'ast, Symbol>,
) -> Result<(), Errors> {
let mut expander = MacroExpander::new(vm, userdata, spawn);
expander.run(symbols, arena, expr).await;
expander.finish()
}
}
pub struct MacroExpander<'a> {
pub state: FnvMap<String, Box<dyn Any + Send>>,
pub vm: &'a Thread,
pub errors: Errors,
pub userdata: &'a mut (dyn MacroUserdata + 'a),
pub spawn: Option<&'a (dyn Spawn + Send + Sync + 'a)>,
macros: &'a MacroEnv,
}
impl<'a> MacroExpander<'a> {
pub fn new(
vm: &'a Thread,
userdata: &'a mut (dyn MacroUserdata + 'a),
spawn: Option<&'a (dyn Spawn + Send + Sync + 'a)>,
) -> Self {
MacroExpander {
vm,
state: FnvMap::default(),
macros: vm.get_macros(),
userdata,
spawn,
errors: Errors::new(),
}
}
pub fn fork(&self, userdata: &'a mut (dyn MacroUserdata + 'a)) -> MacroExpander<'a> {
MacroExpander {
vm: self.vm,
state: FnvMap::default(),
macros: self.macros,
userdata,
spawn: self.spawn,
errors: Errors::new(),
}
}
pub fn finish(self) -> Result<(), Errors> {
if self.errors.has_errors() {
Err(self.errors)
} else {
Ok(())
}
}
pub async fn run<'ast>(
&mut self,
symbols: &mut Symbols,
mut arena: ast::OwnedArena<'ast, Symbol>,
expr: &'ast mut SpannedExpr<'ast, Symbol>,
) {
self.run_once(symbols, &mut arena, expr).await; // FIXME
}
pub async fn run_once<'ast>(
&mut self,
symbols: &mut Symbols,
arena: &mut ast::OwnedArena<'ast, Symbol>,
expr: &mut SpannedExpr<'ast, Symbol>,
) {
let mut visitor = MacroVisitor {
expander: self,
symbols,
arena,
exprs: Vec::new(),
};
visitor.visit_expr(expr);
let MacroVisitor { exprs, symbols,.. } = visitor;
self.expand(symbols, arena, exprs).await
}
async fn expand<'ast>(
&mut self,
symbols: &mut Symbols,
arena: &mut ast::OwnedArena<'ast, Symbol>,
mut exprs: Vec<(&'_ mut SpannedExpr<'ast, Symbol>, Arc<dyn Macro>)>,
) {
let mut futures = Vec::with_capacity(exprs.len());
for (expr, mac) in exprs.drain(..) {
let result = match &mut expr.value {
Expr::App { args,.. } => mac.expand(self, symbols, arena, args).await,
_ => unreachable!("{:?}", expr),
};
match result {
Ok(result) => futures.push(result.compute().map(move |result| (expr, result))),
Err(err) => {
self.errors.push(pos::spanned(expr.span, err));
replace_expr(arena, expr, Expr::Error(None));
}
}
}
// Index each expansion future so we can keep any returned errors in a consistent order
let mut stream = futures
.into_iter()
.enumerate()
.map(|(index, future)| future.map(move |x| (index, x)))
.collect::<futures::stream::FuturesUnordered<_>>();
let mut unordered_errors = Vec::new();
while let Some((index, (expr, result))) = stream.next().await {
let expr = { expr };
let new_expr = match result {
Ok(replacement) => replacement.value,
Err(Salvage { error, value }) => {
unordered_errors.push((index, pos::spanned(expr.span, error)));
value.map_or_else(|| Expr::Error(None), |e| e.value)
}
};
replace_expr(arena, expr, new_expr);
}
unordered_errors.sort_by_key(|&(index, _)| index);
self.errors
.extend(unordered_errors.into_iter().map(|(_, err)| err));
}
}
fn replace_expr<'ast>(
arena: &ast::OwnedArena<'ast, Symbol>,
expr: &mut SpannedExpr<'ast, Symbol>,
new: Expr<'ast, Symbol>,
) {
let expr_span = expr.span;
let original = mem::replace(expr, pos::spanned(expr_span, Expr::Error(None)));
*expr = pos::spanned(
expr.span,
Expr::MacroExpansion {
original: arena.alloc(original),
replacement: arena.alloc(pos::spanned(expr_span, new)),
},
);
}
struct MacroVisitor<'a: 'b, 'b, 'c, 'd, 'e, 'ast> {
expander: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'d mut ast::OwnedArena<'ast, Symbol>,
exprs: Vec<(&'e mut SpannedExpr<'ast, Symbol>, Arc<dyn Macro>)>,
}
impl<'a, 'b, 'c, 'e, 'ast> MutVisitor<'e, 'ast> for MacroVisitor<'a, 'b, 'c, '_, 'e, 'ast> {
type Ident = Symbol;
fn visit_expr(&mut self, expr: &'e mut SpannedExpr<'ast, Symbol>) {
let replacement = match &mut expr.value {
Expr::App {
implicit_args,
func,
args: _,
} => match &func.value {
Expr::Ident(ref id) if id.name.as_str().ends_with('!') => {
if!implicit_args.is_empty() {
self.expander.errors.push(pos::spanned(
expr.span,
Error::message("Implicit arguments are not allowed on macros"),
));
}
let name = id.name.as_str();
match self.expander.macros.get(&name[..name.len() - 1]) {
// FIXME Avoid cloning args
Some(m) => Some(m.clone()),
None => None,
}
}
_ => None,
},
Expr::TypeBindings(binds, body) => {
let Self {
arena,
symbols,
expander,
..
} = self;
let mut generated_bindings = Vec::new();
for bind in &**binds {
generated_bindings.extend(
bind.metadata
.attributes()
.filter(|attr| attr.name == "derive")
.map(|derive| {
match crate::derive::generate(arena.borrow(), symbols, derive, bind)
{
Ok(x) => x,
Err(err) => {
expander.errors.push(pos::spanned(bind.name.span, err));
Vec::new()
}
}
})
.flatten(),
);
}
if!generated_bindings.is_empty() {
let next_expr = mem::take(*body);
body.value =
Expr::rec_let_bindings(self.
|
{
Self::Done(r)
}
|
identifier_body
|
macros.rs
|
codespan_reporting::diagnostic::Diagnostic,
downcast_rs::{impl_downcast, Downcast},
futures::{prelude::*, task::Spawn},
};
use gluon_codegen::Trace;
use crate::base::{
ast::{self, Expr, MutVisitor, SpannedExpr},
error::{AsDiagnostic, Errors as BaseErrors, Salvage, SalvageResult},
fnv::FnvMap,
pos,
pos::{BytePos, Spanned},
source::FileId,
symbol::{Symbol, Symbols},
};
use crate::{
gc::Trace,
thread::{RootedThread, Thread},
};
pub type SpannedError = Spanned<Error, BytePos>;
pub type Errors = BaseErrors<SpannedError>;
pub type MacroResult<'ast> = Result<SpannedExpr<'ast, Symbol>, Error>;
pub type SalvageMacroResult<'ast> = SalvageResult<SpannedExpr<'ast, Symbol>, Error>;
pub enum LazyMacroResult<'ast> {
Done(SpannedExpr<'ast, Symbol>),
Lazy(
Box<
dyn for<'a> FnOnce() -> Pin<
Box<dyn Future<Output = SalvageMacroResult<'ast>> + Send + 'ast>,
> + Send
+ 'ast,
>,
),
}
impl<'ast> LazyMacroResult<'ast> {
async fn compute(self) -> SalvageMacroResult<'ast> {
match self {
Self::Done(r) => Ok(r),
Self::Lazy(f) => f().await,
}
}
}
impl<'ast> From<SpannedExpr<'ast, Symbol>> for LazyMacroResult<'ast> {
fn from(r: SpannedExpr<'ast, Symbol>) -> Self {
Self::Done(r)
}
}
impl<'ast, F> From<F> for LazyMacroResult<'ast>
where
for<'a> F: FnOnce() -> Pin<Box<dyn Future<Output = SalvageMacroResult<'ast>> + Send + 'ast>>
+ Send
+ 'ast,
{
fn from(r: F) -> Self {
Self::Lazy(Box::new(r))
}
}
pub type MacroFuture<'r, 'ast> =
Pin<Box<dyn Future<Output = Result<LazyMacroResult<'ast>, Error>> + Send + 'r>>;
pub trait Captures<'a> {}
impl<T> Captures<'_> for T {}
pub trait DowncastArc: Downcast {
fn into_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync>;
}
impl<T> DowncastArc for T
where
T: Downcast + Send + Sync,
{
fn into_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
self
}
}
pub trait MacroError: DowncastArc + StdError + AsDiagnostic + Send + Sync +'static {
fn clone_error(&self) -> Error;
fn eq_error(&self, other: &dyn MacroError) -> bool;
fn hash_error(&self, hash: &mut dyn std::hash::Hasher);
}
impl_downcast!(MacroError);
impl dyn MacroError {
#[inline]
pub fn downcast_arc<T: MacroError>(self: Arc<Self>) -> Result<Arc<T>, Arc<Self>>
where
Self: Send + Sync,
{
if self.is::<T>() {
Ok(DowncastArc::into_arc_any(self).downcast::<T>().unwrap())
} else {
Err(self)
}
}
}
impl<T> MacroError for T
where
T: Clone + PartialEq + std::hash::Hash + AsDiagnostic + StdError + Send + Sync +'static,
{
fn clone_error(&self) -> Error {
Error(Box::new(self.clone()))
}
fn eq_error(&self, other: &dyn MacroError) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |other| self == other)
}
fn hash_error(&self, mut hash: &mut dyn std::hash::Hasher) {
self.hash(&mut hash)
}
}
#[derive(Debug)]
pub struct Error(Box<dyn MacroError>);
impl StdError for Error {
#[allow(deprecated)]
fn description(&self) -> &str {
self.0.description()
}
fn source(&self) -> Option<&(dyn StdError +'static)> {
self.0.source()
}
}
impl AsDiagnostic for Error {
fn as_diagnostic(&self, map: &base::source::CodeMap) -> Diagnostic<FileId> {
self.0.as_diagnostic(map)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Clone for Error {
fn clone(&self) -> Self {
self.0.clone_error()
}
}
impl Eq for Error {}
impl PartialEq for Error {
fn
|
(&self, other: &Self) -> bool {
self.0.eq_error(&*other.0)
}
}
impl std::hash::Hash for Error {
fn hash<H>(&self, state: &mut H)
where
H: std::hash::Hasher,
{
self.0.hash_error(state)
}
}
impl Error {
pub fn new<E>(err: E) -> Self
where
E: MacroError,
{
Self(Box::new(err))
}
pub fn message(s: impl Into<String>) -> Error {
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
struct StringError(String);
impl StdError for StringError {
fn description(&self) -> &str {
&self.0
}
}
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl AsDiagnostic for StringError {
fn as_diagnostic(&self, _map: &base::source::CodeMap) -> Diagnostic<FileId> {
Diagnostic::error().with_message(self.to_string())
}
}
Self::new(StringError(s.into()))
}
pub fn downcast<T>(self) -> Result<Box<T>, Self>
where
T: MacroError,
{
self.0.downcast().map_err(Self)
}
}
/// A trait which abstracts over macros.
///
/// A macro is similiar to a function call but is run at compile time instead of at runtime.
pub trait Macro: Trace + DowncastArc + Send + Sync {
fn get_capability<T>(&self, thread: &Thread, arc_self: &Arc<dyn Macro>) -> Option<T>
where
Self: Sized,
T: Any,
{
self.get_capability_impl(thread, arc_self, TypeId::of::<T>())
.map(|b| {
*b.downcast::<T>()
.ok()
.expect("get_capability_impl return an unexpected type")
})
}
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
let _ = (thread, arc_self, id);
None
}
/// Creating a symbol in `symbols` will put it in the same scope as the code surrounding the
/// expansion. If you want to create a unique symbol then call `Symbol::from` or create a new
/// `Symbols` table
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast>;
}
impl_downcast!(Macro);
impl dyn Macro {
#[inline]
pub fn downcast_arc<T: Macro>(self: Arc<Self>) -> Result<Arc<T>, Arc<Self>>
where
Self: Send + Sync,
{
if self.is::<T>() {
Ok(DowncastArc::into_arc_any(self).downcast::<T>().unwrap())
} else {
Err(self)
}
}
}
#[async_trait::async_trait]
impl<M> Macro for Box<M>
where
M: Macro +?Sized,
{
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
(**self).get_capability_impl(thread, arc_self, id)
}
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast> {
(**self).expand(env, symbols, arena, args)
}
}
#[async_trait::async_trait]
impl<M> Macro for Arc<M>
where
M: Macro +?Sized,
{
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
(**self).get_capability_impl(thread, arc_self, id)
}
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast> {
(**self).expand(env, symbols, arena, args)
}
}
pub trait MacroUserdata: Send {
fn fork(&self, thread: RootedThread) -> Box<dyn Any>;
}
/// Type containing macros bound to symbols which can be applied on an AST expression to transform
/// it.
#[derive(Trace, Default)]
#[gluon(gluon_vm)]
pub struct MacroEnv {
macros: RwLock<FnvMap<String, Arc<dyn Macro>>>,
}
impl MacroEnv {
/// Creates a new `MacroEnv`
pub fn new() -> MacroEnv {
MacroEnv {
macros: RwLock::new(FnvMap::default()),
}
}
/// Inserts a `Macro` which acts on any occurance of `symbol` when applied to an expression.
pub fn insert<M>(&self, name: String, mac: M)
where
M: Macro +'static,
{
self.macros.write().unwrap().insert(name, Arc::new(mac));
}
/// Retrieves the macro bound to `symbol`
pub fn get(&self, name: &str) -> Option<Arc<dyn Macro>> {
self.macros.read().unwrap().get(name).cloned()
}
pub fn get_capabilities<T>(&self, thread: &Thread) -> Vec<T>
where
T: Any,
{
let macros = self.macros.read().unwrap();
macros
.values()
.filter_map(|mac| mac.get_capability::<T>(thread, mac))
.collect()
}
pub fn get_capability<T>(&self, thread: &Thread) -> Option<T>
where
T: Any,
{
let macros = self.macros.read().unwrap();
macros
.values()
.find_map(|mac| mac.get_capability::<T>(thread, mac))
}
pub fn clear(&self) {
self.macros.write().unwrap().clear();
}
/// Runs the macros in this `MacroEnv` on `expr` using `env` as the context of the expansion
pub async fn run<'ast>(
&self,
vm: &Thread,
userdata: &mut (dyn MacroUserdata + '_),
spawn: Option<&(dyn Spawn + Send + Sync + '_)>,
symbols: &mut Symbols,
arena: ast::OwnedArena<'ast, Symbol>,
expr: &'ast mut SpannedExpr<'ast, Symbol>,
) -> Result<(), Errors> {
let mut expander = MacroExpander::new(vm, userdata, spawn);
expander.run(symbols, arena, expr).await;
expander.finish()
}
}
pub struct MacroExpander<'a> {
pub state: FnvMap<String, Box<dyn Any + Send>>,
pub vm: &'a Thread,
pub errors: Errors,
pub userdata: &'a mut (dyn MacroUserdata + 'a),
pub spawn: Option<&'a (dyn Spawn + Send + Sync + 'a)>,
macros: &'a MacroEnv,
}
impl<'a> MacroExpander<'a> {
pub fn new(
vm: &'a Thread,
userdata: &'a mut (dyn MacroUserdata + 'a),
spawn: Option<&'a (dyn Spawn + Send + Sync + 'a)>,
) -> Self {
MacroExpander {
vm,
state: FnvMap::default(),
macros: vm.get_macros(),
userdata,
spawn,
errors: Errors::new(),
}
}
pub fn fork(&self, userdata: &'a mut (dyn MacroUserdata + 'a)) -> MacroExpander<'a> {
MacroExpander {
vm: self.vm,
state: FnvMap::default(),
macros: self.macros,
userdata,
spawn: self.spawn,
errors: Errors::new(),
}
}
pub fn finish(self) -> Result<(), Errors> {
if self.errors.has_errors() {
Err(self.errors)
} else {
Ok(())
}
}
pub async fn run<'ast>(
&mut self,
symbols: &mut Symbols,
mut arena: ast::OwnedArena<'ast, Symbol>,
expr: &'ast mut SpannedExpr<'ast, Symbol>,
) {
self.run_once(symbols, &mut arena, expr).await; // FIXME
}
pub async fn run_once<'ast>(
&mut self,
symbols: &mut Symbols,
arena: &mut ast::OwnedArena<'ast, Symbol>,
expr: &mut SpannedExpr<'ast, Symbol>,
) {
let mut visitor = MacroVisitor {
expander: self,
symbols,
arena,
exprs: Vec::new(),
};
visitor.visit_expr(expr);
let MacroVisitor { exprs, symbols,.. } = visitor;
self.expand(symbols, arena, exprs).await
}
async fn expand<'ast>(
&mut self,
symbols: &mut Symbols,
arena: &mut ast::OwnedArena<'ast, Symbol>,
mut exprs: Vec<(&'_ mut SpannedExpr<'ast, Symbol>, Arc<dyn Macro>)>,
) {
let mut futures = Vec::with_capacity(exprs.len());
for (expr, mac) in exprs.drain(..) {
let result = match &mut expr.value {
Expr::App { args,.. } => mac.expand(self, symbols, arena, args).await,
_ => unreachable!("{:?}", expr),
};
match result {
Ok(result) => futures.push(result.compute().map(move |result| (expr, result))),
Err(err) => {
self.errors.push(pos::spanned(expr.span, err));
replace_expr(arena, expr, Expr::Error(None));
}
}
}
// Index each expansion future so we can keep any returned errors in a consistent order
let mut stream = futures
.into_iter()
.enumerate()
.map(|(index, future)| future.map(move |x| (index, x)))
.collect::<futures::stream::FuturesUnordered<_>>();
let mut unordered_errors = Vec::new();
while let Some((index, (expr, result))) = stream.next().await {
let expr = { expr };
let new_expr = match result {
Ok(replacement) => replacement.value,
Err(Salvage { error, value }) => {
unordered_errors.push((index, pos::spanned(expr.span, error)));
value.map_or_else(|| Expr::Error(None), |e| e.value)
}
};
replace_expr(arena, expr, new_expr);
}
unordered_errors.sort_by_key(|&(index, _)| index);
self.errors
.extend(unordered_errors.into_iter().map(|(_, err)| err));
}
}
fn replace_expr<'ast>(
arena: &ast::OwnedArena<'ast, Symbol>,
expr: &mut SpannedExpr<'ast, Symbol>,
new: Expr<'ast, Symbol>,
) {
let expr_span = expr.span;
let original = mem::replace(expr, pos::spanned(expr_span, Expr::Error(None)));
*expr = pos::spanned(
expr.span,
Expr::MacroExpansion {
original: arena.alloc(original),
replacement: arena.alloc(pos::spanned(expr_span, new)),
},
);
}
struct MacroVisitor<'a: 'b, 'b, 'c, 'd, 'e, 'ast> {
expander: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'d mut ast::OwnedArena<'ast, Symbol>,
exprs: Vec<(&'e mut SpannedExpr<'ast, Symbol>, Arc<dyn Macro>)>,
}
impl<'a, 'b, 'c, 'e, 'ast> MutVisitor<'e, 'ast> for MacroVisitor<'a, 'b, 'c, '_, 'e, 'ast> {
type Ident = Symbol;
fn visit_expr(&mut self, expr: &'e mut SpannedExpr<'ast, Symbol>) {
let replacement = match &mut expr.value {
Expr::App {
implicit_args,
func,
args: _,
} => match &func.value {
Expr::Ident(ref id) if id.name.as_str().ends_with('!') => {
if!implicit_args.is_empty() {
self.expander.errors.push(pos::spanned(
expr.span,
Error::message("Implicit arguments are not allowed on macros"),
));
}
let name = id.name.as_str();
match self.expander.macros.get(&name[..name.len() - 1]) {
// FIXME Avoid cloning args
Some(m) => Some(m.clone()),
None => None,
}
}
_ => None,
},
Expr::TypeBindings(binds, body) => {
let Self {
arena,
symbols,
expander,
..
} = self;
let mut generated_bindings = Vec::new();
for bind in &**binds {
generated_bindings.extend(
bind.metadata
.attributes()
.filter(|attr| attr.name == "derive")
.map(|derive| {
match crate::derive::generate(arena.borrow(), symbols, derive, bind)
{
Ok(x) => x,
Err(err) => {
expander.errors.push(pos::spanned(bind.name.span, err));
Vec::new()
}
}
})
.flatten(),
);
}
if!generated_bindings.is_empty() {
let next_expr = mem::take(*body);
body.value =
Expr::rec_let_bindings(self.
|
eq
|
identifier_name
|
macros.rs
|
codespan_reporting::diagnostic::Diagnostic,
downcast_rs::{impl_downcast, Downcast},
futures::{prelude::*, task::Spawn},
};
use gluon_codegen::Trace;
use crate::base::{
ast::{self, Expr, MutVisitor, SpannedExpr},
error::{AsDiagnostic, Errors as BaseErrors, Salvage, SalvageResult},
fnv::FnvMap,
pos,
pos::{BytePos, Spanned},
source::FileId,
symbol::{Symbol, Symbols},
};
use crate::{
gc::Trace,
thread::{RootedThread, Thread},
};
pub type SpannedError = Spanned<Error, BytePos>;
pub type Errors = BaseErrors<SpannedError>;
pub type MacroResult<'ast> = Result<SpannedExpr<'ast, Symbol>, Error>;
pub type SalvageMacroResult<'ast> = SalvageResult<SpannedExpr<'ast, Symbol>, Error>;
pub enum LazyMacroResult<'ast> {
Done(SpannedExpr<'ast, Symbol>),
Lazy(
Box<
dyn for<'a> FnOnce() -> Pin<
Box<dyn Future<Output = SalvageMacroResult<'ast>> + Send + 'ast>,
> + Send
+ 'ast,
>,
),
}
impl<'ast> LazyMacroResult<'ast> {
async fn compute(self) -> SalvageMacroResult<'ast> {
match self {
Self::Done(r) => Ok(r),
Self::Lazy(f) => f().await,
}
}
}
impl<'ast> From<SpannedExpr<'ast, Symbol>> for LazyMacroResult<'ast> {
fn from(r: SpannedExpr<'ast, Symbol>) -> Self {
Self::Done(r)
}
}
impl<'ast, F> From<F> for LazyMacroResult<'ast>
where
for<'a> F: FnOnce() -> Pin<Box<dyn Future<Output = SalvageMacroResult<'ast>> + Send + 'ast>>
+ Send
+ 'ast,
{
fn from(r: F) -> Self {
Self::Lazy(Box::new(r))
}
}
pub type MacroFuture<'r, 'ast> =
Pin<Box<dyn Future<Output = Result<LazyMacroResult<'ast>, Error>> + Send + 'r>>;
pub trait Captures<'a> {}
impl<T> Captures<'_> for T {}
pub trait DowncastArc: Downcast {
fn into_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync>;
}
impl<T> DowncastArc for T
where
T: Downcast + Send + Sync,
{
fn into_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
self
}
}
pub trait MacroError: DowncastArc + StdError + AsDiagnostic + Send + Sync +'static {
fn clone_error(&self) -> Error;
fn eq_error(&self, other: &dyn MacroError) -> bool;
fn hash_error(&self, hash: &mut dyn std::hash::Hasher);
}
impl_downcast!(MacroError);
impl dyn MacroError {
#[inline]
pub fn downcast_arc<T: MacroError>(self: Arc<Self>) -> Result<Arc<T>, Arc<Self>>
where
Self: Send + Sync,
{
if self.is::<T>() {
Ok(DowncastArc::into_arc_any(self).downcast::<T>().unwrap())
} else {
Err(self)
}
}
}
impl<T> MacroError for T
where
T: Clone + PartialEq + std::hash::Hash + AsDiagnostic + StdError + Send + Sync +'static,
{
fn clone_error(&self) -> Error {
Error(Box::new(self.clone()))
}
fn eq_error(&self, other: &dyn MacroError) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |other| self == other)
}
fn hash_error(&self, mut hash: &mut dyn std::hash::Hasher) {
self.hash(&mut hash)
}
}
#[derive(Debug)]
pub struct Error(Box<dyn MacroError>);
impl StdError for Error {
#[allow(deprecated)]
fn description(&self) -> &str {
self.0.description()
}
fn source(&self) -> Option<&(dyn StdError +'static)> {
self.0.source()
}
}
impl AsDiagnostic for Error {
fn as_diagnostic(&self, map: &base::source::CodeMap) -> Diagnostic<FileId> {
self.0.as_diagnostic(map)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Clone for Error {
fn clone(&self) -> Self {
self.0.clone_error()
}
}
impl Eq for Error {}
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
self.0.eq_error(&*other.0)
}
}
impl std::hash::Hash for Error {
fn hash<H>(&self, state: &mut H)
where
H: std::hash::Hasher,
{
self.0.hash_error(state)
}
}
impl Error {
pub fn new<E>(err: E) -> Self
where
E: MacroError,
{
Self(Box::new(err))
}
pub fn message(s: impl Into<String>) -> Error {
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
struct StringError(String);
impl StdError for StringError {
fn description(&self) -> &str {
&self.0
}
}
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl AsDiagnostic for StringError {
fn as_diagnostic(&self, _map: &base::source::CodeMap) -> Diagnostic<FileId> {
Diagnostic::error().with_message(self.to_string())
}
}
Self::new(StringError(s.into()))
}
pub fn downcast<T>(self) -> Result<Box<T>, Self>
where
T: MacroError,
{
self.0.downcast().map_err(Self)
}
}
/// A trait which abstracts over macros.
///
/// A macro is similiar to a function call but is run at compile time instead of at runtime.
pub trait Macro: Trace + DowncastArc + Send + Sync {
fn get_capability<T>(&self, thread: &Thread, arc_self: &Arc<dyn Macro>) -> Option<T>
where
Self: Sized,
T: Any,
{
self.get_capability_impl(thread, arc_self, TypeId::of::<T>())
.map(|b| {
*b.downcast::<T>()
.ok()
.expect("get_capability_impl return an unexpected type")
})
}
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
let _ = (thread, arc_self, id);
None
}
/// Creating a symbol in `symbols` will put it in the same scope as the code surrounding the
/// expansion. If you want to create a unique symbol then call `Symbol::from` or create a new
/// `Symbols` table
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast>;
}
impl_downcast!(Macro);
impl dyn Macro {
#[inline]
pub fn downcast_arc<T: Macro>(self: Arc<Self>) -> Result<Arc<T>, Arc<Self>>
where
Self: Send + Sync,
{
if self.is::<T>() {
Ok(DowncastArc::into_arc_any(self).downcast::<T>().unwrap())
} else {
Err(self)
}
}
}
#[async_trait::async_trait]
impl<M> Macro for Box<M>
where
M: Macro +?Sized,
{
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
(**self).get_capability_impl(thread, arc_self, id)
}
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast> {
(**self).expand(env, symbols, arena, args)
}
}
#[async_trait::async_trait]
impl<M> Macro for Arc<M>
where
M: Macro +?Sized,
{
fn get_capability_impl(
&self,
thread: &Thread,
arc_self: &Arc<dyn Macro>,
id: TypeId,
) -> Option<Box<dyn Any>> {
(**self).get_capability_impl(thread, arc_self, id)
}
fn expand<'r, 'a: 'r, 'b: 'r, 'c: 'r, 'ast: 'r>(
&self,
env: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'b mut ast::OwnedArena<'ast, Symbol>,
args: &'b mut [SpannedExpr<'ast, Symbol>],
) -> MacroFuture<'r, 'ast> {
(**self).expand(env, symbols, arena, args)
}
}
pub trait MacroUserdata: Send {
fn fork(&self, thread: RootedThread) -> Box<dyn Any>;
}
/// Type containing macros bound to symbols which can be applied on an AST expression to transform
/// it.
#[derive(Trace, Default)]
#[gluon(gluon_vm)]
pub struct MacroEnv {
macros: RwLock<FnvMap<String, Arc<dyn Macro>>>,
}
impl MacroEnv {
/// Creates a new `MacroEnv`
pub fn new() -> MacroEnv {
MacroEnv {
macros: RwLock::new(FnvMap::default()),
}
}
/// Inserts a `Macro` which acts on any occurance of `symbol` when applied to an expression.
pub fn insert<M>(&self, name: String, mac: M)
where
M: Macro +'static,
{
self.macros.write().unwrap().insert(name, Arc::new(mac));
}
/// Retrieves the macro bound to `symbol`
pub fn get(&self, name: &str) -> Option<Arc<dyn Macro>> {
self.macros.read().unwrap().get(name).cloned()
}
pub fn get_capabilities<T>(&self, thread: &Thread) -> Vec<T>
where
T: Any,
{
let macros = self.macros.read().unwrap();
macros
.values()
.filter_map(|mac| mac.get_capability::<T>(thread, mac))
.collect()
}
pub fn get_capability<T>(&self, thread: &Thread) -> Option<T>
where
T: Any,
{
let macros = self.macros.read().unwrap();
macros
.values()
.find_map(|mac| mac.get_capability::<T>(thread, mac))
}
pub fn clear(&self) {
self.macros.write().unwrap().clear();
}
/// Runs the macros in this `MacroEnv` on `expr` using `env` as the context of the expansion
pub async fn run<'ast>(
&self,
vm: &Thread,
userdata: &mut (dyn MacroUserdata + '_),
spawn: Option<&(dyn Spawn + Send + Sync + '_)>,
symbols: &mut Symbols,
arena: ast::OwnedArena<'ast, Symbol>,
expr: &'ast mut SpannedExpr<'ast, Symbol>,
) -> Result<(), Errors> {
let mut expander = MacroExpander::new(vm, userdata, spawn);
expander.run(symbols, arena, expr).await;
expander.finish()
}
}
pub struct MacroExpander<'a> {
pub state: FnvMap<String, Box<dyn Any + Send>>,
pub vm: &'a Thread,
pub errors: Errors,
pub userdata: &'a mut (dyn MacroUserdata + 'a),
pub spawn: Option<&'a (dyn Spawn + Send + Sync + 'a)>,
macros: &'a MacroEnv,
}
impl<'a> MacroExpander<'a> {
pub fn new(
vm: &'a Thread,
userdata: &'a mut (dyn MacroUserdata + 'a),
spawn: Option<&'a (dyn Spawn + Send + Sync + 'a)>,
) -> Self {
MacroExpander {
vm,
state: FnvMap::default(),
macros: vm.get_macros(),
userdata,
spawn,
errors: Errors::new(),
}
}
pub fn fork(&self, userdata: &'a mut (dyn MacroUserdata + 'a)) -> MacroExpander<'a> {
MacroExpander {
vm: self.vm,
state: FnvMap::default(),
macros: self.macros,
userdata,
spawn: self.spawn,
errors: Errors::new(),
}
}
pub fn finish(self) -> Result<(), Errors> {
if self.errors.has_errors() {
Err(self.errors)
} else {
Ok(())
}
}
pub async fn run<'ast>(
&mut self,
symbols: &mut Symbols,
mut arena: ast::OwnedArena<'ast, Symbol>,
expr: &'ast mut SpannedExpr<'ast, Symbol>,
) {
self.run_once(symbols, &mut arena, expr).await; // FIXME
}
pub async fn run_once<'ast>(
&mut self,
symbols: &mut Symbols,
arena: &mut ast::OwnedArena<'ast, Symbol>,
expr: &mut SpannedExpr<'ast, Symbol>,
) {
let mut visitor = MacroVisitor {
expander: self,
symbols,
arena,
exprs: Vec::new(),
};
visitor.visit_expr(expr);
let MacroVisitor { exprs, symbols,.. } = visitor;
self.expand(symbols, arena, exprs).await
}
async fn expand<'ast>(
&mut self,
symbols: &mut Symbols,
arena: &mut ast::OwnedArena<'ast, Symbol>,
mut exprs: Vec<(&'_ mut SpannedExpr<'ast, Symbol>, Arc<dyn Macro>)>,
) {
let mut futures = Vec::with_capacity(exprs.len());
for (expr, mac) in exprs.drain(..) {
let result = match &mut expr.value {
Expr::App { args,.. } => mac.expand(self, symbols, arena, args).await,
_ => unreachable!("{:?}", expr),
};
match result {
Ok(result) => futures.push(result.compute().map(move |result| (expr, result))),
Err(err) => {
self.errors.push(pos::spanned(expr.span, err));
replace_expr(arena, expr, Expr::Error(None));
}
}
}
// Index each expansion future so we can keep any returned errors in a consistent order
let mut stream = futures
.into_iter()
.enumerate()
.map(|(index, future)| future.map(move |x| (index, x)))
.collect::<futures::stream::FuturesUnordered<_>>();
let mut unordered_errors = Vec::new();
while let Some((index, (expr, result))) = stream.next().await {
let expr = { expr };
let new_expr = match result {
Ok(replacement) => replacement.value,
Err(Salvage { error, value }) => {
unordered_errors.push((index, pos::spanned(expr.span, error)));
value.map_or_else(|| Expr::Error(None), |e| e.value)
}
};
replace_expr(arena, expr, new_expr);
}
unordered_errors.sort_by_key(|&(index, _)| index);
self.errors
|
}
fn replace_expr<'ast>(
arena: &ast::OwnedArena<'ast, Symbol>,
expr: &mut SpannedExpr<'ast, Symbol>,
new: Expr<'ast, Symbol>,
) {
let expr_span = expr.span;
let original = mem::replace(expr, pos::spanned(expr_span, Expr::Error(None)));
*expr = pos::spanned(
expr.span,
Expr::MacroExpansion {
original: arena.alloc(original),
replacement: arena.alloc(pos::spanned(expr_span, new)),
},
);
}
struct MacroVisitor<'a: 'b, 'b, 'c, 'd, 'e, 'ast> {
expander: &'b mut MacroExpander<'a>,
symbols: &'c mut Symbols,
arena: &'d mut ast::OwnedArena<'ast, Symbol>,
exprs: Vec<(&'e mut SpannedExpr<'ast, Symbol>, Arc<dyn Macro>)>,
}
impl<'a, 'b, 'c, 'e, 'ast> MutVisitor<'e, 'ast> for MacroVisitor<'a, 'b, 'c, '_, 'e, 'ast> {
type Ident = Symbol;
fn visit_expr(&mut self, expr: &'e mut SpannedExpr<'ast, Symbol>) {
let replacement = match &mut expr.value {
Expr::App {
implicit_args,
func,
args: _,
} => match &func.value {
Expr::Ident(ref id) if id.name.as_str().ends_with('!') => {
if!implicit_args.is_empty() {
self.expander.errors.push(pos::spanned(
expr.span,
Error::message("Implicit arguments are not allowed on macros"),
));
}
let name = id.name.as_str();
match self.expander.macros.get(&name[..name.len() - 1]) {
// FIXME Avoid cloning args
Some(m) => Some(m.clone()),
None => None,
}
}
_ => None,
},
Expr::TypeBindings(binds, body) => {
let Self {
arena,
symbols,
expander,
..
} = self;
let mut generated_bindings = Vec::new();
for bind in &**binds {
generated_bindings.extend(
bind.metadata
.attributes()
.filter(|attr| attr.name == "derive")
.map(|derive| {
match crate::derive::generate(arena.borrow(), symbols, derive, bind)
{
Ok(x) => x,
Err(err) => {
expander.errors.push(pos::spanned(bind.name.span, err));
Vec::new()
}
}
})
.flatten(),
);
}
if!generated_bindings.is_empty() {
let next_expr = mem::take(*body);
body.value =
Expr::rec_let_bindings(self.arena
|
.extend(unordered_errors.into_iter().map(|(_, err)| err));
}
|
random_line_split
|
wl_data_source.rs
|
// Copyright 2014 Jonathan Eyolfson
use libc::{c_char, c_int, c_void, uint32_t};
use raw;
use raw::types::listeners;
use raw::types::objects;
pub const WL_DATA_SOURCE_OFFER: uint32_t = 0;
pub const WL_DATA_SOURCE_DESTROY: uint32_t = 1;
#[inline(always)]
pub unsafe fn wl_data_source_add_listener(
wl_data_source: *mut objects::wl_data_source,
listener: *const listeners::wl_data_source_listener,
data: *mut c_void
) -> c_int {
raw::wl_proxy_add_listener(
wl_data_source as *mut objects::wl_proxy,
listener as *mut extern fn(),
data
)
}
|
pub unsafe fn wl_data_source_set_user_data(
wl_data_source: *mut objects::wl_data_source,
user_data: *mut c_void
) {
raw::wl_proxy_set_user_data(
wl_data_source as *mut objects::wl_proxy,
user_data
)
}
#[inline(always)]
pub unsafe fn wl_data_source_get_user_data(
wl_data_source: *mut objects::wl_data_source
) -> *mut c_void {
raw::wl_proxy_get_user_data(wl_data_source as *mut objects::wl_proxy)
}
#[inline(always)]
pub unsafe fn wl_data_source_offer(
wl_data_source: *mut objects::wl_data_source,
mime_type: *const c_char
) {
raw::wl_proxy_marshal(
wl_data_source as *mut objects::wl_proxy,
WL_DATA_SOURCE_OFFER,
mime_type
)
}
#[inline(always)]
pub unsafe fn wl_data_source_destroy(
wl_data_source: *mut objects::wl_data_source
) {
raw::wl_proxy_marshal(
wl_data_source as *mut objects::wl_proxy,
WL_DATA_SOURCE_DESTROY
);
raw::wl_proxy_destroy(wl_data_source as *mut objects::wl_proxy)
}
|
#[inline(always)]
|
random_line_split
|
wl_data_source.rs
|
// Copyright 2014 Jonathan Eyolfson
use libc::{c_char, c_int, c_void, uint32_t};
use raw;
use raw::types::listeners;
use raw::types::objects;
pub const WL_DATA_SOURCE_OFFER: uint32_t = 0;
pub const WL_DATA_SOURCE_DESTROY: uint32_t = 1;
#[inline(always)]
pub unsafe fn wl_data_source_add_listener(
wl_data_source: *mut objects::wl_data_source,
listener: *const listeners::wl_data_source_listener,
data: *mut c_void
) -> c_int {
raw::wl_proxy_add_listener(
wl_data_source as *mut objects::wl_proxy,
listener as *mut extern fn(),
data
)
}
#[inline(always)]
pub unsafe fn wl_data_source_set_user_data(
wl_data_source: *mut objects::wl_data_source,
user_data: *mut c_void
) {
raw::wl_proxy_set_user_data(
wl_data_source as *mut objects::wl_proxy,
user_data
)
}
#[inline(always)]
pub unsafe fn
|
(
wl_data_source: *mut objects::wl_data_source
) -> *mut c_void {
raw::wl_proxy_get_user_data(wl_data_source as *mut objects::wl_proxy)
}
#[inline(always)]
pub unsafe fn wl_data_source_offer(
wl_data_source: *mut objects::wl_data_source,
mime_type: *const c_char
) {
raw::wl_proxy_marshal(
wl_data_source as *mut objects::wl_proxy,
WL_DATA_SOURCE_OFFER,
mime_type
)
}
#[inline(always)]
pub unsafe fn wl_data_source_destroy(
wl_data_source: *mut objects::wl_data_source
) {
raw::wl_proxy_marshal(
wl_data_source as *mut objects::wl_proxy,
WL_DATA_SOURCE_DESTROY
);
raw::wl_proxy_destroy(wl_data_source as *mut objects::wl_proxy)
}
|
wl_data_source_get_user_data
|
identifier_name
|
wl_data_source.rs
|
// Copyright 2014 Jonathan Eyolfson
use libc::{c_char, c_int, c_void, uint32_t};
use raw;
use raw::types::listeners;
use raw::types::objects;
pub const WL_DATA_SOURCE_OFFER: uint32_t = 0;
pub const WL_DATA_SOURCE_DESTROY: uint32_t = 1;
#[inline(always)]
pub unsafe fn wl_data_source_add_listener(
wl_data_source: *mut objects::wl_data_source,
listener: *const listeners::wl_data_source_listener,
data: *mut c_void
) -> c_int {
raw::wl_proxy_add_listener(
wl_data_source as *mut objects::wl_proxy,
listener as *mut extern fn(),
data
)
}
#[inline(always)]
pub unsafe fn wl_data_source_set_user_data(
wl_data_source: *mut objects::wl_data_source,
user_data: *mut c_void
) {
raw::wl_proxy_set_user_data(
wl_data_source as *mut objects::wl_proxy,
user_data
)
}
#[inline(always)]
pub unsafe fn wl_data_source_get_user_data(
wl_data_source: *mut objects::wl_data_source
) -> *mut c_void
|
#[inline(always)]
pub unsafe fn wl_data_source_offer(
wl_data_source: *mut objects::wl_data_source,
mime_type: *const c_char
) {
raw::wl_proxy_marshal(
wl_data_source as *mut objects::wl_proxy,
WL_DATA_SOURCE_OFFER,
mime_type
)
}
#[inline(always)]
pub unsafe fn wl_data_source_destroy(
wl_data_source: *mut objects::wl_data_source
) {
raw::wl_proxy_marshal(
wl_data_source as *mut objects::wl_proxy,
WL_DATA_SOURCE_DESTROY
);
raw::wl_proxy_destroy(wl_data_source as *mut objects::wl_proxy)
}
|
{
raw::wl_proxy_get_user_data(wl_data_source as *mut objects::wl_proxy)
}
|
identifier_body
|
associated-types-cc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// aux-build:associated-types-cc-lib.rs
// Test that we are able to reference cross-crate traits that employ
// associated types.
|
extern crate associated_types_cc_lib as bar;
use bar::Bar;
fn foo<B:Bar>(b: B) -> <B as Bar>::T {
Bar::get(None::<B>)
}
fn main() {
println!("{}", foo(3));
}
|
random_line_split
|
|
associated-types-cc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// aux-build:associated-types-cc-lib.rs
// Test that we are able to reference cross-crate traits that employ
// associated types.
extern crate associated_types_cc_lib as bar;
use bar::Bar;
fn foo<B:Bar>(b: B) -> <B as Bar>::T {
Bar::get(None::<B>)
}
fn main()
|
{
println!("{}", foo(3));
}
|
identifier_body
|
|
associated-types-cc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// aux-build:associated-types-cc-lib.rs
// Test that we are able to reference cross-crate traits that employ
// associated types.
extern crate associated_types_cc_lib as bar;
use bar::Bar;
fn foo<B:Bar>(b: B) -> <B as Bar>::T {
Bar::get(None::<B>)
}
fn
|
() {
println!("{}", foo(3));
}
|
main
|
identifier_name
|
object-method-numbering.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test for using an object with an associated type binding as the
// instantiation for a generic type with a bound.
trait SomeTrait {
type SomeType;
fn get(&self) -> Self::SomeType;
}
fn
|
<T:SomeTrait<SomeType=i32>+?Sized>(x: &T) -> i32 {
x.get()
}
impl SomeTrait for i32 {
type SomeType = i32;
fn get(&self) -> i32 {
*self
}
}
fn main() {
let x = 22;
let x1: &SomeTrait<SomeType=i32> = &x;
let y = get_int(x1);
assert_eq!(x, y);
}
|
get_int
|
identifier_name
|
object-method-numbering.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test for using an object with an associated type binding as the
// instantiation for a generic type with a bound.
trait SomeTrait {
type SomeType;
fn get(&self) -> Self::SomeType;
}
fn get_int<T:SomeTrait<SomeType=i32>+?Sized>(x: &T) -> i32 {
x.get()
}
impl SomeTrait for i32 {
type SomeType = i32;
fn get(&self) -> i32 {
*self
}
}
fn main()
|
{
let x = 22;
let x1: &SomeTrait<SomeType=i32> = &x;
let y = get_int(x1);
assert_eq!(x, y);
}
|
identifier_body
|
|
object-method-numbering.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test for using an object with an associated type binding as the
// instantiation for a generic type with a bound.
trait SomeTrait {
type SomeType;
fn get(&self) -> Self::SomeType;
}
fn get_int<T:SomeTrait<SomeType=i32>+?Sized>(x: &T) -> i32 {
x.get()
}
impl SomeTrait for i32 {
type SomeType = i32;
fn get(&self) -> i32 {
*self
}
}
fn main() {
let x = 22;
let x1: &SomeTrait<SomeType=i32> = &x;
let y = get_int(x1);
assert_eq!(x, y);
}
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
random_line_split
|
over_nano.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[cfg(test)]
mod tests {
use super::super::service::*;
use super::super::with_attrs::PrettyNamedClient;
use nanoipc;
use std::sync::Arc;
use std::io::Write;
use std::sync::atomic::{Ordering, AtomicBool};
fn dummy_write(addr: &str, buf: &[u8]) -> (::nanomsg::Socket, ::nanomsg::Endpoint) {
let mut socket = ::nanomsg::Socket::new(::nanomsg::Protocol::Pair).unwrap();
let endpoint = socket.connect(addr).unwrap();
socket.write(buf).unwrap();
(socket, endpoint)
}
fn init_worker(addr: &str) -> nanoipc::Worker<Service>
|
#[test]
fn can_create_client() {
let client = nanoipc::init_duplex_client::<ServiceClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
assert!(client.is_ok());
}
#[test]
fn can_create_renamed_client() {
let client = nanoipc::init_duplex_client::<PrettyNamedClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
assert!(client.is_ok());
}
#[test]
fn can_call_handshake() {
let url = "ipc:///tmp/parity-test-nano-20.ipc";
let worker_should_exit = Arc::new(AtomicBool::new(false));
let worker_is_ready = Arc::new(AtomicBool::new(false));
let c_worker_should_exit = worker_should_exit.clone();
let c_worker_is_ready = worker_is_ready.clone();
::std::thread::spawn(move || {
let mut worker = init_worker(url);
while!c_worker_should_exit.load(Ordering::Relaxed) {
worker.poll();
c_worker_is_ready.store(true, Ordering::Relaxed);
}
});
while!worker_is_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_duplex_client::<ServiceClient<_>>(url).unwrap();
let hs = client.handshake();
worker_should_exit.store(true, Ordering::Relaxed);
assert!(hs.is_ok());
}
}
|
{
let mut worker = nanoipc::Worker::<Service>::new(&Arc::new(Service::new()));
worker.add_duplex(addr).unwrap();
worker
}
|
identifier_body
|
over_nano.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[cfg(test)]
mod tests {
use super::super::service::*;
use super::super::with_attrs::PrettyNamedClient;
use nanoipc;
use std::sync::Arc;
use std::io::Write;
use std::sync::atomic::{Ordering, AtomicBool};
fn dummy_write(addr: &str, buf: &[u8]) -> (::nanomsg::Socket, ::nanomsg::Endpoint) {
let mut socket = ::nanomsg::Socket::new(::nanomsg::Protocol::Pair).unwrap();
let endpoint = socket.connect(addr).unwrap();
socket.write(buf).unwrap();
(socket, endpoint)
}
fn
|
(addr: &str) -> nanoipc::Worker<Service> {
let mut worker = nanoipc::Worker::<Service>::new(&Arc::new(Service::new()));
worker.add_duplex(addr).unwrap();
worker
}
#[test]
fn can_create_client() {
let client = nanoipc::init_duplex_client::<ServiceClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
assert!(client.is_ok());
}
#[test]
fn can_create_renamed_client() {
let client = nanoipc::init_duplex_client::<PrettyNamedClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
assert!(client.is_ok());
}
#[test]
fn can_call_handshake() {
let url = "ipc:///tmp/parity-test-nano-20.ipc";
let worker_should_exit = Arc::new(AtomicBool::new(false));
let worker_is_ready = Arc::new(AtomicBool::new(false));
let c_worker_should_exit = worker_should_exit.clone();
let c_worker_is_ready = worker_is_ready.clone();
::std::thread::spawn(move || {
let mut worker = init_worker(url);
while!c_worker_should_exit.load(Ordering::Relaxed) {
worker.poll();
c_worker_is_ready.store(true, Ordering::Relaxed);
}
});
while!worker_is_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_duplex_client::<ServiceClient<_>>(url).unwrap();
let hs = client.handshake();
worker_should_exit.store(true, Ordering::Relaxed);
assert!(hs.is_ok());
}
}
|
init_worker
|
identifier_name
|
over_nano.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[cfg(test)]
mod tests {
use super::super::service::*;
use super::super::with_attrs::PrettyNamedClient;
use nanoipc;
use std::sync::Arc;
use std::io::Write;
use std::sync::atomic::{Ordering, AtomicBool};
fn dummy_write(addr: &str, buf: &[u8]) -> (::nanomsg::Socket, ::nanomsg::Endpoint) {
let mut socket = ::nanomsg::Socket::new(::nanomsg::Protocol::Pair).unwrap();
let endpoint = socket.connect(addr).unwrap();
socket.write(buf).unwrap();
(socket, endpoint)
}
|
fn init_worker(addr: &str) -> nanoipc::Worker<Service> {
let mut worker = nanoipc::Worker::<Service>::new(&Arc::new(Service::new()));
worker.add_duplex(addr).unwrap();
worker
}
#[test]
fn can_create_client() {
let client = nanoipc::init_duplex_client::<ServiceClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
assert!(client.is_ok());
}
#[test]
fn can_create_renamed_client() {
let client = nanoipc::init_duplex_client::<PrettyNamedClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
assert!(client.is_ok());
}
#[test]
fn can_call_handshake() {
let url = "ipc:///tmp/parity-test-nano-20.ipc";
let worker_should_exit = Arc::new(AtomicBool::new(false));
let worker_is_ready = Arc::new(AtomicBool::new(false));
let c_worker_should_exit = worker_should_exit.clone();
let c_worker_is_ready = worker_is_ready.clone();
::std::thread::spawn(move || {
let mut worker = init_worker(url);
while!c_worker_should_exit.load(Ordering::Relaxed) {
worker.poll();
c_worker_is_ready.store(true, Ordering::Relaxed);
}
});
while!worker_is_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_duplex_client::<ServiceClient<_>>(url).unwrap();
let hs = client.handshake();
worker_should_exit.store(true, Ordering::Relaxed);
assert!(hs.is_ok());
}
}
|
random_line_split
|
|
ext.rs
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use syn::{Data, DataEnum, DataStruct, DataUnion, Field, Fields, Type};
pub trait DataExt {
fn nested_types(&self) -> Vec<&Type>;
}
impl DataExt for Data {
fn nested_types(&self) -> Vec<&Type> {
match self {
Data::Struct(strc) => strc.nested_types(),
Data::Enum(enm) => enm.nested_types(),
Data::Union(un) => un.nested_types(),
}
}
}
impl DataExt for DataStruct {
fn nested_types(&self) -> Vec<&Type> {
fields_to_types(&self.fields)
}
}
impl DataExt for DataEnum {
fn nested_types(&self) -> Vec<&Type> {
self.variants.iter().map(|var| fields_to_types(&var.fields)).fold(Vec::new(), |mut a, b| {
a.extend(b);
a
})
}
}
pub trait EnumExt {
fn is_c_like(&self) -> bool;
}
impl EnumExt for DataEnum {
fn is_c_like(&self) -> bool
|
}
impl DataExt for DataUnion {
fn nested_types(&self) -> Vec<&Type> {
field_iter_to_types(&self.fields.named)
}
}
fn fields_to_types(fields: &Fields) -> Vec<&Type> {
match fields {
Fields::Named(named) => field_iter_to_types(&named.named),
Fields::Unnamed(unnamed) => field_iter_to_types(&unnamed.unnamed),
Fields::Unit => Vec::new(),
}
}
fn field_iter_to_types<'a, I: IntoIterator<Item = &'a Field>>(fields: I) -> Vec<&'a Type> {
fields.into_iter().map(|f| &f.ty).collect()
}
|
{
self.nested_types().is_empty()
}
|
identifier_body
|
ext.rs
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use syn::{Data, DataEnum, DataStruct, DataUnion, Field, Fields, Type};
pub trait DataExt {
fn nested_types(&self) -> Vec<&Type>;
}
impl DataExt for Data {
fn nested_types(&self) -> Vec<&Type> {
match self {
Data::Struct(strc) => strc.nested_types(),
Data::Enum(enm) => enm.nested_types(),
Data::Union(un) => un.nested_types(),
}
}
}
impl DataExt for DataStruct {
fn nested_types(&self) -> Vec<&Type> {
fields_to_types(&self.fields)
}
}
impl DataExt for DataEnum {
fn nested_types(&self) -> Vec<&Type> {
self.variants.iter().map(|var| fields_to_types(&var.fields)).fold(Vec::new(), |mut a, b| {
a.extend(b);
a
})
}
}
pub trait EnumExt {
fn is_c_like(&self) -> bool;
}
impl EnumExt for DataEnum {
fn
|
(&self) -> bool {
self.nested_types().is_empty()
}
}
impl DataExt for DataUnion {
fn nested_types(&self) -> Vec<&Type> {
field_iter_to_types(&self.fields.named)
}
}
fn fields_to_types(fields: &Fields) -> Vec<&Type> {
match fields {
Fields::Named(named) => field_iter_to_types(&named.named),
Fields::Unnamed(unnamed) => field_iter_to_types(&unnamed.unnamed),
Fields::Unit => Vec::new(),
}
}
fn field_iter_to_types<'a, I: IntoIterator<Item = &'a Field>>(fields: I) -> Vec<&'a Type> {
fields.into_iter().map(|f| &f.ty).collect()
}
|
is_c_like
|
identifier_name
|
ext.rs
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use syn::{Data, DataEnum, DataStruct, DataUnion, Field, Fields, Type};
pub trait DataExt {
fn nested_types(&self) -> Vec<&Type>;
}
impl DataExt for Data {
fn nested_types(&self) -> Vec<&Type> {
match self {
Data::Struct(strc) => strc.nested_types(),
Data::Enum(enm) => enm.nested_types(),
Data::Union(un) => un.nested_types(),
}
}
}
impl DataExt for DataStruct {
fn nested_types(&self) -> Vec<&Type> {
fields_to_types(&self.fields)
|
impl DataExt for DataEnum {
fn nested_types(&self) -> Vec<&Type> {
self.variants.iter().map(|var| fields_to_types(&var.fields)).fold(Vec::new(), |mut a, b| {
a.extend(b);
a
})
}
}
pub trait EnumExt {
fn is_c_like(&self) -> bool;
}
impl EnumExt for DataEnum {
fn is_c_like(&self) -> bool {
self.nested_types().is_empty()
}
}
impl DataExt for DataUnion {
fn nested_types(&self) -> Vec<&Type> {
field_iter_to_types(&self.fields.named)
}
}
fn fields_to_types(fields: &Fields) -> Vec<&Type> {
match fields {
Fields::Named(named) => field_iter_to_types(&named.named),
Fields::Unnamed(unnamed) => field_iter_to_types(&unnamed.unnamed),
Fields::Unit => Vec::new(),
}
}
fn field_iter_to_types<'a, I: IntoIterator<Item = &'a Field>>(fields: I) -> Vec<&'a Type> {
fields.into_iter().map(|f| &f.ty).collect()
}
|
}
}
|
random_line_split
|
serial.rs
|
//! contains class Serial - singleton for writing symbols to serial port.
use ::fmt;
use ::ioports::*;
use ::utility::*;
const PORT: u16 = 0x3f8;
static mut PORT_0: IOPort<(), u8> = IOPort::new(PORT + 0);
static mut PORT_1: IOPort<(), u8> = IOPort::new(PORT + 1);
// static mut PORT_2: IOPort<(), u8> = IOPort::new(PORT + 2); // unused
static mut PORT_3: IOPort<(), u8> = IOPort::new(PORT + 3);
//static mut PORT_4: IOPort<(), u8> = IOPort::new(PORT + 4); // unused
static mut PORT_5: IOPort<u8, ()> = IOPort::new(PORT + 5);
|
pub fn get() -> &'static mut Self {
static mut INIT: bool = false;
unsafe {
if!INIT {
// Disable all interrupts
PORT_1.write(0b0000_0000);
// Enable DLAB (set baud rate divisor)
PORT_3.write(0b1000_0000);
// Set divisor to 3 (lo byte) 38400 baud
PORT_0.write(0b0000_0011);
// (hi byte)
PORT_1.write(0b0000_0000);
// Frame format: 8 bits, no parity, one stop bit
PORT_3.write(0b0000_0011);
INIT = true;
}
}
unsafe { &mut OBJECT }
}
}
impl fmt::Write for Serial {
fn write_char(&mut self, c: char) -> fmt::Result {
loop {
let free = unsafe { get_bit(PORT_5.read(), 5) };
if free {
unsafe { PORT_0.write(c as u8) };
break;
}
}
Ok(())
}
fn write_str(&mut self, s: &str) -> fmt::Result {
for char in s.chars() {
try!(self.write_char(char));
}
Ok(())
}
}
|
pub struct Serial;
static mut OBJECT: Serial = Serial { };
impl Serial {
|
random_line_split
|
serial.rs
|
//! contains class Serial - singleton for writing symbols to serial port.
use ::fmt;
use ::ioports::*;
use ::utility::*;
const PORT: u16 = 0x3f8;
static mut PORT_0: IOPort<(), u8> = IOPort::new(PORT + 0);
static mut PORT_1: IOPort<(), u8> = IOPort::new(PORT + 1);
// static mut PORT_2: IOPort<(), u8> = IOPort::new(PORT + 2); // unused
static mut PORT_3: IOPort<(), u8> = IOPort::new(PORT + 3);
//static mut PORT_4: IOPort<(), u8> = IOPort::new(PORT + 4); // unused
static mut PORT_5: IOPort<u8, ()> = IOPort::new(PORT + 5);
pub struct Serial;
static mut OBJECT: Serial = Serial { };
impl Serial {
pub fn get() -> &'static mut Self {
static mut INIT: bool = false;
unsafe {
if!INIT
|
}
unsafe { &mut OBJECT }
}
}
impl fmt::Write for Serial {
fn write_char(&mut self, c: char) -> fmt::Result {
loop {
let free = unsafe { get_bit(PORT_5.read(), 5) };
if free {
unsafe { PORT_0.write(c as u8) };
break;
}
}
Ok(())
}
fn write_str(&mut self, s: &str) -> fmt::Result {
for char in s.chars() {
try!(self.write_char(char));
}
Ok(())
}
}
|
{
// Disable all interrupts
PORT_1.write(0b0000_0000);
// Enable DLAB (set baud rate divisor)
PORT_3.write(0b1000_0000);
// Set divisor to 3 (lo byte) 38400 baud
PORT_0.write(0b0000_0011);
// (hi byte)
PORT_1.write(0b0000_0000);
// Frame format: 8 bits, no parity, one stop bit
PORT_3.write(0b0000_0011);
INIT = true;
}
|
conditional_block
|
serial.rs
|
//! contains class Serial - singleton for writing symbols to serial port.
use ::fmt;
use ::ioports::*;
use ::utility::*;
const PORT: u16 = 0x3f8;
static mut PORT_0: IOPort<(), u8> = IOPort::new(PORT + 0);
static mut PORT_1: IOPort<(), u8> = IOPort::new(PORT + 1);
// static mut PORT_2: IOPort<(), u8> = IOPort::new(PORT + 2); // unused
static mut PORT_3: IOPort<(), u8> = IOPort::new(PORT + 3);
//static mut PORT_4: IOPort<(), u8> = IOPort::new(PORT + 4); // unused
static mut PORT_5: IOPort<u8, ()> = IOPort::new(PORT + 5);
pub struct Serial;
static mut OBJECT: Serial = Serial { };
impl Serial {
pub fn get() -> &'static mut Self {
static mut INIT: bool = false;
unsafe {
if!INIT {
// Disable all interrupts
PORT_1.write(0b0000_0000);
// Enable DLAB (set baud rate divisor)
PORT_3.write(0b1000_0000);
// Set divisor to 3 (lo byte) 38400 baud
PORT_0.write(0b0000_0011);
// (hi byte)
PORT_1.write(0b0000_0000);
// Frame format: 8 bits, no parity, one stop bit
PORT_3.write(0b0000_0011);
INIT = true;
}
}
unsafe { &mut OBJECT }
}
}
impl fmt::Write for Serial {
fn write_char(&mut self, c: char) -> fmt::Result
|
fn write_str(&mut self, s: &str) -> fmt::Result {
for char in s.chars() {
try!(self.write_char(char));
}
Ok(())
}
}
|
{
loop {
let free = unsafe { get_bit(PORT_5.read(), 5) };
if free {
unsafe { PORT_0.write(c as u8) };
break;
}
}
Ok(())
}
|
identifier_body
|
serial.rs
|
//! contains class Serial - singleton for writing symbols to serial port.
use ::fmt;
use ::ioports::*;
use ::utility::*;
const PORT: u16 = 0x3f8;
static mut PORT_0: IOPort<(), u8> = IOPort::new(PORT + 0);
static mut PORT_1: IOPort<(), u8> = IOPort::new(PORT + 1);
// static mut PORT_2: IOPort<(), u8> = IOPort::new(PORT + 2); // unused
static mut PORT_3: IOPort<(), u8> = IOPort::new(PORT + 3);
//static mut PORT_4: IOPort<(), u8> = IOPort::new(PORT + 4); // unused
static mut PORT_5: IOPort<u8, ()> = IOPort::new(PORT + 5);
pub struct
|
;
static mut OBJECT: Serial = Serial { };
impl Serial {
pub fn get() -> &'static mut Self {
static mut INIT: bool = false;
unsafe {
if!INIT {
// Disable all interrupts
PORT_1.write(0b0000_0000);
// Enable DLAB (set baud rate divisor)
PORT_3.write(0b1000_0000);
// Set divisor to 3 (lo byte) 38400 baud
PORT_0.write(0b0000_0011);
// (hi byte)
PORT_1.write(0b0000_0000);
// Frame format: 8 bits, no parity, one stop bit
PORT_3.write(0b0000_0011);
INIT = true;
}
}
unsafe { &mut OBJECT }
}
}
impl fmt::Write for Serial {
fn write_char(&mut self, c: char) -> fmt::Result {
loop {
let free = unsafe { get_bit(PORT_5.read(), 5) };
if free {
unsafe { PORT_0.write(c as u8) };
break;
}
}
Ok(())
}
fn write_str(&mut self, s: &str) -> fmt::Result {
for char in s.chars() {
try!(self.write_char(char));
}
Ok(())
}
}
|
Serial
|
identifier_name
|
alt-borrowed_str.rs
|
// xfail-test
// xfail-fast
// -*- rust -*-
fn f1(ref_string: &str) {
match ref_string {
"a" => io::println("found a"),
"b" => io::println("found b"),
_ => io::println("not found")
}
}
fn f2(ref_string: &str) {
match ref_string {
"a" => io::println("found a"),
"b" => io::println("found b"),
s => io::println(fmt!("not found (%s)", s))
}
}
fn g1(ref_1: &str, ref_2: &str) {
match (ref_1, ref_2) {
("a", "b") => io::println("found a,b"),
("b", "c") => io::println("found b,c"),
_ => io::println("not found")
}
}
fn g2(ref_1: &str, ref_2: &str) {
match (ref_1, ref_2) {
("a", "b") => io::println("found a,b"),
("b", "c") => io::println("found b,c"),
(s1, s2) => io::println(fmt!("not found (%s, %s)", s1, s2))
}
}
pub fn
|
() {
f1(@"a");
f1(~"b");
f1(&"c");
f1("d");
f2(@"a");
f2(~"b");
f2(&"c");
f2("d");
g1(@"a", @"b");
g1(~"b", ~"c");
g1(&"c", &"d");
g1("d", "e");
g2(@"a", @"b");
g2(~"b", ~"c");
g2(&"c", &"d");
g2("d", "e");
}
|
main
|
identifier_name
|
alt-borrowed_str.rs
|
// xfail-test
// xfail-fast
// -*- rust -*-
fn f1(ref_string: &str) {
match ref_string {
"a" => io::println("found a"),
"b" => io::println("found b"),
_ => io::println("not found")
}
}
fn f2(ref_string: &str)
|
fn g1(ref_1: &str, ref_2: &str) {
match (ref_1, ref_2) {
("a", "b") => io::println("found a,b"),
("b", "c") => io::println("found b,c"),
_ => io::println("not found")
}
}
fn g2(ref_1: &str, ref_2: &str) {
match (ref_1, ref_2) {
("a", "b") => io::println("found a,b"),
("b", "c") => io::println("found b,c"),
(s1, s2) => io::println(fmt!("not found (%s, %s)", s1, s2))
}
}
pub fn main() {
f1(@"a");
f1(~"b");
f1(&"c");
f1("d");
f2(@"a");
f2(~"b");
f2(&"c");
f2("d");
g1(@"a", @"b");
g1(~"b", ~"c");
g1(&"c", &"d");
g1("d", "e");
g2(@"a", @"b");
g2(~"b", ~"c");
g2(&"c", &"d");
g2("d", "e");
}
|
{
match ref_string {
"a" => io::println("found a"),
"b" => io::println("found b"),
s => io::println(fmt!("not found (%s)", s))
}
}
|
identifier_body
|
alt-borrowed_str.rs
|
// xfail-test
// xfail-fast
// -*- rust -*-
fn f1(ref_string: &str) {
match ref_string {
"a" => io::println("found a"),
"b" => io::println("found b"),
_ => io::println("not found")
}
}
fn f2(ref_string: &str) {
match ref_string {
"a" => io::println("found a"),
"b" => io::println("found b"),
s => io::println(fmt!("not found (%s)", s))
}
}
fn g1(ref_1: &str, ref_2: &str) {
match (ref_1, ref_2) {
("a", "b") => io::println("found a,b"),
("b", "c") => io::println("found b,c"),
_ => io::println("not found")
}
}
fn g2(ref_1: &str, ref_2: &str) {
match (ref_1, ref_2) {
("a", "b") => io::println("found a,b"),
("b", "c") => io::println("found b,c"),
(s1, s2) => io::println(fmt!("not found (%s, %s)", s1, s2))
}
}
pub fn main() {
f1(@"a");
f1(~"b");
|
f1("d");
f2(@"a");
f2(~"b");
f2(&"c");
f2("d");
g1(@"a", @"b");
g1(~"b", ~"c");
g1(&"c", &"d");
g1("d", "e");
g2(@"a", @"b");
g2(~"b", ~"c");
g2(&"c", &"d");
g2("d", "e");
}
|
f1(&"c");
|
random_line_split
|
rust_string.rs
|
extern crate tor_util;
extern crate libc;
use std::ffi::CString;
use tor_util::RustString;
#[test]
fn rust_string_conversions_preserve_c_string() {
let s = CString::new("asdf foo").unwrap();
let r = RustString::from(s.clone());
let r2 = RustString::from(s.clone());
let c = r2.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 8);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn
|
() {
let s = CString::new("").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 0);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn c_string_with_unicode() {
// The euro sign is three bytes
let s = CString::new("asd€asd").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 9);
let c_str = r.into();
assert_eq!(s, c_str);
}
|
empty_string
|
identifier_name
|
rust_string.rs
|
extern crate tor_util;
extern crate libc;
use std::ffi::CString;
use tor_util::RustString;
#[test]
fn rust_string_conversions_preserve_c_string()
|
#[test]
fn empty_string() {
let s = CString::new("").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 0);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn c_string_with_unicode() {
// The euro sign is three bytes
let s = CString::new("asd€asd").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 9);
let c_str = r.into();
assert_eq!(s, c_str);
}
|
{
let s = CString::new("asdf foo").unwrap();
let r = RustString::from(s.clone());
let r2 = RustString::from(s.clone());
let c = r2.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 8);
let c_str = r.into();
assert_eq!(s, c_str);
}
|
identifier_body
|
rust_string.rs
|
extern crate tor_util;
extern crate libc;
|
let s = CString::new("asdf foo").unwrap();
let r = RustString::from(s.clone());
let r2 = RustString::from(s.clone());
let c = r2.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 8);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn empty_string() {
let s = CString::new("").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 0);
let c_str = r.into();
assert_eq!(s, c_str);
}
#[test]
fn c_string_with_unicode() {
// The euro sign is three bytes
let s = CString::new("asd€asd").unwrap();
let r = RustString::from(s.clone());
let c = r.as_ptr();
assert_eq!(unsafe { libc::strlen(c) }, 9);
let c_str = r.into();
assert_eq!(s, c_str);
}
|
use std::ffi::CString;
use tor_util::RustString;
#[test]
fn rust_string_conversions_preserve_c_string() {
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.