file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
peers.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0 | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! TUI peer display
use std::cmp::Ordering;
use crate::servers::{PeerStats, ServerStats};
use chrono::prelude::*;
use humansize::{file_size_opts::CONVENTIONAL, FileSize};
use cursive::direction::Orientation;
use cursive::event::Key;
use cursive::traits::{Boxable, Identifiable};
use cursive::view::View;
use cursive::views::{Dialog, LinearLayout, OnEventView, ResizedView, TextView};
use cursive::Cursive;
use crate::tui::constants::{MAIN_MENU, TABLE_PEER_STATUS, VIEW_PEER_SYNC};
use crate::tui::types::TUIStatusListener;
use cursive_table_view::{TableView, TableViewItem};
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum PeerColumn {
Address,
State,
UsedBandwidth,
TotalDifficulty,
Direction,
Version,
UserAgent,
Capabilities,
}
impl PeerColumn {
fn _as_str(&self) -> &str {
match *self {
PeerColumn::Address => "Address",
PeerColumn::State => "State",
PeerColumn::UsedBandwidth => "Used bandwidth",
PeerColumn::Version => "Version",
PeerColumn::TotalDifficulty => "Total Difficulty",
PeerColumn::Direction => "Direction",
PeerColumn::UserAgent => "User Agent",
PeerColumn::Capabilities => "Capabilities",
}
}
}
impl TableViewItem<PeerColumn> for PeerStats {
fn to_column(&self, column: PeerColumn) -> String {
// Converts optional size to human readable size
fn size_to_string(size: u64) -> String {
size.file_size(CONVENTIONAL)
.unwrap_or_else(|_| "-".to_string())
}
match column {
PeerColumn::Address => self.addr.clone(),
PeerColumn::State => self.state.clone(),
PeerColumn::UsedBandwidth => format!(
"↑: {}, ↓: {}",
size_to_string(self.sent_bytes_per_sec),
size_to_string(self.received_bytes_per_sec),
),
PeerColumn::TotalDifficulty => format!(
"{} D @ {} H ({}s)",
self.total_difficulty,
self.height,
(Utc::now() - self.last_seen).num_seconds(),
),
PeerColumn::Direction => self.direction.clone(),
PeerColumn::Version => format!("{}", self.version),
PeerColumn::UserAgent => self.user_agent.clone(),
PeerColumn::Capabilities => format!("{}", self.capabilities.bits()),
}
}
fn cmp(&self, other: &Self, column: PeerColumn) -> Ordering
where
Self: Sized,
{
// Compares used bandwidth of two peers
fn cmp_used_bandwidth(curr: &PeerStats, other: &PeerStats) -> Ordering {
let curr_recv_bytes = curr.received_bytes_per_sec;
let curr_sent_bytes = curr.sent_bytes_per_sec;
let other_recv_bytes = other.received_bytes_per_sec;
let other_sent_bytes = other.sent_bytes_per_sec;
let curr_sum = curr_recv_bytes + curr_sent_bytes;
let other_sum = other_recv_bytes + other_sent_bytes;
curr_sum.cmp(&other_sum)
}
let sort_by_addr = || self.addr.cmp(&other.addr);
match column {
PeerColumn::Address => sort_by_addr(),
PeerColumn::State => self.state.cmp(&other.state).then(sort_by_addr()),
PeerColumn::UsedBandwidth => cmp_used_bandwidth(&self, &other).then(sort_by_addr()),
PeerColumn::TotalDifficulty => self
.total_difficulty
.cmp(&other.total_difficulty)
.then(sort_by_addr()),
PeerColumn::Direction => self.direction.cmp(&other.direction).then(sort_by_addr()),
PeerColumn::Version => self.version.cmp(&other.version).then(sort_by_addr()),
PeerColumn::UserAgent => self.user_agent.cmp(&other.user_agent).then(sort_by_addr()),
PeerColumn::Capabilities => self
.capabilities
.cmp(&other.capabilities)
.then(sort_by_addr()),
}
}
}
pub struct TUIPeerView;
impl TUIPeerView {
pub fn create() -> impl View {
let table_view = TableView::<PeerStats, PeerColumn>::new()
.column(PeerColumn::Address, "Address", |c| c.width_percent(16))
.column(PeerColumn::State, "State", |c| c.width_percent(8))
.column(PeerColumn::UsedBandwidth, "Used bandwidth", |c| {
c.width_percent(16)
})
.column(PeerColumn::Direction, "Direction", |c| c.width_percent(8))
.column(PeerColumn::TotalDifficulty, "Total Difficulty", |c| {
c.width_percent(24)
})
.column(PeerColumn::Version, "Proto", |c| c.width_percent(4))
.column(PeerColumn::Capabilities, "Capab", |c| c.width_percent(4))
.column(PeerColumn::UserAgent, "User Agent", |c| c.width_percent(18));
let peer_status_view = ResizedView::with_full_screen(
LinearLayout::new(Orientation::Vertical)
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new(" ").with_name("peers_total")),
)
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new("Longest Chain: "))
.child(TextView::new(" ").with_name("longest_work_peer")),
)
.child(TextView::new(" "))
.child(
Dialog::around(table_view.with_name(TABLE_PEER_STATUS).min_size((50, 20)))
.title("Connected Peers"),
),
)
.with_name(VIEW_PEER_SYNC);
let peer_status_view =
OnEventView::new(peer_status_view).on_pre_event(Key::Esc, move |c| {
let _ = c.focus_name(MAIN_MENU);
});
peer_status_view
}
}
impl TUIStatusListener for TUIPeerView {
fn update(c: &mut Cursive, stats: &ServerStats) {
let lp = stats
.peer_stats
.iter()
.max_by(|x, y| x.total_difficulty.cmp(&y.total_difficulty));
let lp_str = match lp {
Some(l) => format!(
"{} D @ {} H vs Us: {} D @ {} H",
l.total_difficulty,
l.height,
stats.chain_stats.total_difficulty,
stats.chain_stats.height
),
None => "".to_string(),
};
let _ = c.call_on_name(
TABLE_PEER_STATUS,
|t: &mut TableView<PeerStats, PeerColumn>| {
t.set_items_stable(stats.peer_stats.clone());
},
);
let _ = c.call_on_name("peers_total", |t: &mut TextView| {
t.set_content(format!(
"Total Peers: {} (Outbound: {})",
stats.peer_stats.len(),
stats
.peer_stats
.iter()
.filter(|x| x.direction == "Outbound")
.count(),
));
});
let _ = c.call_on_name("longest_work_peer", |t: &mut TextView| {
t.set_content(lp_str);
});
}
} | // | random_line_split |
peers.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! TUI peer display
use std::cmp::Ordering;
use crate::servers::{PeerStats, ServerStats};
use chrono::prelude::*;
use humansize::{file_size_opts::CONVENTIONAL, FileSize};
use cursive::direction::Orientation;
use cursive::event::Key;
use cursive::traits::{Boxable, Identifiable};
use cursive::view::View;
use cursive::views::{Dialog, LinearLayout, OnEventView, ResizedView, TextView};
use cursive::Cursive;
use crate::tui::constants::{MAIN_MENU, TABLE_PEER_STATUS, VIEW_PEER_SYNC};
use crate::tui::types::TUIStatusListener;
use cursive_table_view::{TableView, TableViewItem};
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum PeerColumn {
Address,
State,
UsedBandwidth,
TotalDifficulty,
Direction,
Version,
UserAgent,
Capabilities,
}
impl PeerColumn {
fn _as_str(&self) -> &str {
match *self {
PeerColumn::Address => "Address",
PeerColumn::State => "State",
PeerColumn::UsedBandwidth => "Used bandwidth",
PeerColumn::Version => "Version",
PeerColumn::TotalDifficulty => "Total Difficulty",
PeerColumn::Direction => "Direction",
PeerColumn::UserAgent => "User Agent",
PeerColumn::Capabilities => "Capabilities",
}
}
}
impl TableViewItem<PeerColumn> for PeerStats {
fn | (&self, column: PeerColumn) -> String {
// Converts optional size to human readable size
fn size_to_string(size: u64) -> String {
size.file_size(CONVENTIONAL)
.unwrap_or_else(|_| "-".to_string())
}
match column {
PeerColumn::Address => self.addr.clone(),
PeerColumn::State => self.state.clone(),
PeerColumn::UsedBandwidth => format!(
"↑: {}, ↓: {}",
size_to_string(self.sent_bytes_per_sec),
size_to_string(self.received_bytes_per_sec),
),
PeerColumn::TotalDifficulty => format!(
"{} D @ {} H ({}s)",
self.total_difficulty,
self.height,
(Utc::now() - self.last_seen).num_seconds(),
),
PeerColumn::Direction => self.direction.clone(),
PeerColumn::Version => format!("{}", self.version),
PeerColumn::UserAgent => self.user_agent.clone(),
PeerColumn::Capabilities => format!("{}", self.capabilities.bits()),
}
}
fn cmp(&self, other: &Self, column: PeerColumn) -> Ordering
where
Self: Sized,
{
// Compares used bandwidth of two peers
fn cmp_used_bandwidth(curr: &PeerStats, other: &PeerStats) -> Ordering {
let curr_recv_bytes = curr.received_bytes_per_sec;
let curr_sent_bytes = curr.sent_bytes_per_sec;
let other_recv_bytes = other.received_bytes_per_sec;
let other_sent_bytes = other.sent_bytes_per_sec;
let curr_sum = curr_recv_bytes + curr_sent_bytes;
let other_sum = other_recv_bytes + other_sent_bytes;
curr_sum.cmp(&other_sum)
}
let sort_by_addr = || self.addr.cmp(&other.addr);
match column {
PeerColumn::Address => sort_by_addr(),
PeerColumn::State => self.state.cmp(&other.state).then(sort_by_addr()),
PeerColumn::UsedBandwidth => cmp_used_bandwidth(&self, &other).then(sort_by_addr()),
PeerColumn::TotalDifficulty => self
.total_difficulty
.cmp(&other.total_difficulty)
.then(sort_by_addr()),
PeerColumn::Direction => self.direction.cmp(&other.direction).then(sort_by_addr()),
PeerColumn::Version => self.version.cmp(&other.version).then(sort_by_addr()),
PeerColumn::UserAgent => self.user_agent.cmp(&other.user_agent).then(sort_by_addr()),
PeerColumn::Capabilities => self
.capabilities
.cmp(&other.capabilities)
.then(sort_by_addr()),
}
}
}
pub struct TUIPeerView;
impl TUIPeerView {
pub fn create() -> impl View {
let table_view = TableView::<PeerStats, PeerColumn>::new()
.column(PeerColumn::Address, "Address", |c| c.width_percent(16))
.column(PeerColumn::State, "State", |c| c.width_percent(8))
.column(PeerColumn::UsedBandwidth, "Used bandwidth", |c| {
c.width_percent(16)
})
.column(PeerColumn::Direction, "Direction", |c| c.width_percent(8))
.column(PeerColumn::TotalDifficulty, "Total Difficulty", |c| {
c.width_percent(24)
})
.column(PeerColumn::Version, "Proto", |c| c.width_percent(4))
.column(PeerColumn::Capabilities, "Capab", |c| c.width_percent(4))
.column(PeerColumn::UserAgent, "User Agent", |c| c.width_percent(18));
let peer_status_view = ResizedView::with_full_screen(
LinearLayout::new(Orientation::Vertical)
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new(" ").with_name("peers_total")),
)
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new("Longest Chain: "))
.child(TextView::new(" ").with_name("longest_work_peer")),
)
.child(TextView::new(" "))
.child(
Dialog::around(table_view.with_name(TABLE_PEER_STATUS).min_size((50, 20)))
.title("Connected Peers"),
),
)
.with_name(VIEW_PEER_SYNC);
let peer_status_view =
OnEventView::new(peer_status_view).on_pre_event(Key::Esc, move |c| {
let _ = c.focus_name(MAIN_MENU);
});
peer_status_view
}
}
impl TUIStatusListener for TUIPeerView {
fn update(c: &mut Cursive, stats: &ServerStats) {
let lp = stats
.peer_stats
.iter()
.max_by(|x, y| x.total_difficulty.cmp(&y.total_difficulty));
let lp_str = match lp {
Some(l) => format!(
"{} D @ {} H vs Us: {} D @ {} H",
l.total_difficulty,
l.height,
stats.chain_stats.total_difficulty,
stats.chain_stats.height
),
None => "".to_string(),
};
let _ = c.call_on_name(
TABLE_PEER_STATUS,
|t: &mut TableView<PeerStats, PeerColumn>| {
t.set_items_stable(stats.peer_stats.clone());
},
);
let _ = c.call_on_name("peers_total", |t: &mut TextView| {
t.set_content(format!(
"Total Peers: {} (Outbound: {})",
stats.peer_stats.len(),
stats
.peer_stats
.iter()
.filter(|x| x.direction == "Outbound")
.count(),
));
});
let _ = c.call_on_name("longest_work_peer", |t: &mut TextView| {
t.set_content(lp_str);
});
}
}
| to_column | identifier_name |
peers.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! TUI peer display
use std::cmp::Ordering;
use crate::servers::{PeerStats, ServerStats};
use chrono::prelude::*;
use humansize::{file_size_opts::CONVENTIONAL, FileSize};
use cursive::direction::Orientation;
use cursive::event::Key;
use cursive::traits::{Boxable, Identifiable};
use cursive::view::View;
use cursive::views::{Dialog, LinearLayout, OnEventView, ResizedView, TextView};
use cursive::Cursive;
use crate::tui::constants::{MAIN_MENU, TABLE_PEER_STATUS, VIEW_PEER_SYNC};
use crate::tui::types::TUIStatusListener;
use cursive_table_view::{TableView, TableViewItem};
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum PeerColumn {
Address,
State,
UsedBandwidth,
TotalDifficulty,
Direction,
Version,
UserAgent,
Capabilities,
}
impl PeerColumn {
fn _as_str(&self) -> &str {
match *self {
PeerColumn::Address => "Address",
PeerColumn::State => "State",
PeerColumn::UsedBandwidth => "Used bandwidth",
PeerColumn::Version => "Version",
PeerColumn::TotalDifficulty => "Total Difficulty",
PeerColumn::Direction => "Direction",
PeerColumn::UserAgent => "User Agent",
PeerColumn::Capabilities => "Capabilities",
}
}
}
impl TableViewItem<PeerColumn> for PeerStats {
fn to_column(&self, column: PeerColumn) -> String {
// Converts optional size to human readable size
fn size_to_string(size: u64) -> String {
size.file_size(CONVENTIONAL)
.unwrap_or_else(|_| "-".to_string())
}
match column {
PeerColumn::Address => self.addr.clone(),
PeerColumn::State => self.state.clone(),
PeerColumn::UsedBandwidth => format!(
"↑: {}, ↓: {}",
size_to_string(self.sent_bytes_per_sec),
size_to_string(self.received_bytes_per_sec),
),
PeerColumn::TotalDifficulty => format!(
"{} D @ {} H ({}s)",
self.total_difficulty,
self.height,
(Utc::now() - self.last_seen).num_seconds(),
),
PeerColumn::Direction => self.direction.clone(),
PeerColumn::Version => format!("{}", self.version),
PeerColumn::UserAgent => self.user_agent.clone(),
PeerColumn::Capabilities => format!("{}", self.capabilities.bits()),
}
}
fn cmp(&self, other: &Self, column: PeerColumn) -> Ordering
where
Self: Sized,
{
// Compares used bandwidth of two peers
fn cmp_used_bandwidth(curr: &PeerStats, other: &PeerStats) -> Ordering {
let curr_recv_bytes = curr.received_bytes_per_sec;
let curr_sent_bytes = curr.sent_bytes_per_sec;
let other_recv_bytes = other.received_bytes_per_sec;
let other_sent_bytes = other.sent_bytes_per_sec;
let curr_sum = curr_recv_bytes + curr_sent_bytes;
let other_sum = other_recv_bytes + other_sent_bytes;
curr_sum.cmp(&other_sum)
}
let sort_by_addr = || self.addr.cmp(&other.addr);
match column {
PeerColumn::Address => sort_by_addr(),
PeerColumn::State => self.state.cmp(&other.state).then(sort_by_addr()),
PeerColumn::UsedBandwidth => cmp_used_bandwidth(&self, &other).then(sort_by_addr()),
PeerColumn::TotalDifficulty => self
.total_difficulty
.cmp(&other.total_difficulty)
.then(sort_by_addr()),
PeerColumn::Direction => self.direction.cmp(&other.direction).then(sort_by_addr()),
PeerColumn::Version => self.version.cmp(&other.version).then(sort_by_addr()),
PeerColumn::UserAgent => self.user_agent.cmp(&other.user_agent).then(sort_by_addr()),
PeerColumn::Capabilities => self
.capabilities
.cmp(&other.capabilities)
.then(sort_by_addr()),
}
}
}
pub struct TUIPeerView;
impl TUIPeerView {
pub fn create() -> impl View {
| LinearLayout::new(Orientation::Horizontal)
.child(TextView::new("Longest Chain: "))
.child(TextView::new(" ").with_name("longest_work_peer")),
)
.child(TextView::new(" "))
.child(
Dialog::around(table_view.with_name(TABLE_PEER_STATUS).min_size((50, 20)))
.title("Connected Peers"),
),
)
.with_name(VIEW_PEER_SYNC);
let peer_status_view =
OnEventView::new(peer_status_view).on_pre_event(Key::Esc, move |c| {
let _ = c.focus_name(MAIN_MENU);
});
peer_status_view
}
}
impl TUIStatusListener for TUIPeerView {
fn update(c: &mut Cursive, stats: &ServerStats) {
let lp = stats
.peer_stats
.iter()
.max_by(|x, y| x.total_difficulty.cmp(&y.total_difficulty));
let lp_str = match lp {
Some(l) => format!(
"{} D @ {} H vs Us: {} D @ {} H",
l.total_difficulty,
l.height,
stats.chain_stats.total_difficulty,
stats.chain_stats.height
),
None => "".to_string(),
};
let _ = c.call_on_name(
TABLE_PEER_STATUS,
|t: &mut TableView<PeerStats, PeerColumn>| {
t.set_items_stable(stats.peer_stats.clone());
},
);
let _ = c.call_on_name("peers_total", |t: &mut TextView| {
t.set_content(format!(
"Total Peers: {} (Outbound: {})",
stats.peer_stats.len(),
stats
.peer_stats
.iter()
.filter(|x| x.direction == "Outbound")
.count(),
));
});
let _ = c.call_on_name("longest_work_peer", |t: &mut TextView| {
t.set_content(lp_str);
});
}
}
| let table_view = TableView::<PeerStats, PeerColumn>::new()
.column(PeerColumn::Address, "Address", |c| c.width_percent(16))
.column(PeerColumn::State, "State", |c| c.width_percent(8))
.column(PeerColumn::UsedBandwidth, "Used bandwidth", |c| {
c.width_percent(16)
})
.column(PeerColumn::Direction, "Direction", |c| c.width_percent(8))
.column(PeerColumn::TotalDifficulty, "Total Difficulty", |c| {
c.width_percent(24)
})
.column(PeerColumn::Version, "Proto", |c| c.width_percent(4))
.column(PeerColumn::Capabilities, "Capab", |c| c.width_percent(4))
.column(PeerColumn::UserAgent, "User Agent", |c| c.width_percent(18));
let peer_status_view = ResizedView::with_full_screen(
LinearLayout::new(Orientation::Vertical)
.child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new(" ").with_name("peers_total")),
)
.child( | identifier_body |
monomorphized-callees-with-ty-params-3314.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(managed_boxes)];
trait Serializer {
}
trait Serializable {
fn serialize<S:Serializer>(&self, s: S);
}
impl Serializable for int {
fn serialize<S:Serializer>(&self, _s: S) { }
}
struct F<A> { a: A }
impl<A:Serializable> Serializable for F<A> {
fn | <S:Serializer>(&self, s: S) {
self.a.serialize(s);
}
}
impl Serializer for int {
}
pub fn main() {
let foo = F { a: 1 };
foo.serialize(1i);
let bar = F { a: F {a: 1 } };
bar.serialize(2i);
}
| serialize | identifier_name |
monomorphized-callees-with-ty-params-3314.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
#[feature(managed_boxes)];
trait Serializer {
}
trait Serializable {
fn serialize<S:Serializer>(&self, s: S);
}
impl Serializable for int {
fn serialize<S:Serializer>(&self, _s: S) { }
}
struct F<A> { a: A }
impl<A:Serializable> Serializable for F<A> {
fn serialize<S:Serializer>(&self, s: S) {
self.a.serialize(s);
}
}
impl Serializer for int {
}
pub fn main() {
let foo = F { a: 1 };
foo.serialize(1i);
let bar = F { a: F {a: 1 } };
bar.serialize(2i);
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | random_line_split |
monomorphized-callees-with-ty-params-3314.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(managed_boxes)];
trait Serializer {
}
trait Serializable {
fn serialize<S:Serializer>(&self, s: S);
}
impl Serializable for int {
fn serialize<S:Serializer>(&self, _s: S) { }
}
struct F<A> { a: A }
impl<A:Serializable> Serializable for F<A> {
fn serialize<S:Serializer>(&self, s: S) |
}
impl Serializer for int {
}
pub fn main() {
let foo = F { a: 1 };
foo.serialize(1i);
let bar = F { a: F {a: 1 } };
bar.serialize(2i);
}
| {
self.a.serialize(s);
} | identifier_body |
results.rs | use std::collections::{HashMap, HashSet};
use super::automata::State;
use rustc_serialize::{Encoder, Encodable};
use ::{Node, NodeIndex, Edge, GraphDB, Graph, PropVal};
use std::hash::{Hash, Hasher};
pub struct MatchResult<'a> {
parent_lookup: HashMap<NodeIndex, Vec<(NodeIndex, &'a Edge)>>,
finished_nodes: HashSet<NodeIndex>,
ref_graph: &'a GraphDB
}
#[derive(Clone, Debug, PartialEq)]
pub struct EdgeNode {
edge: Edge,
node: Node
}
#[derive(Clone, Debug, PartialEq)]
pub struct ResultRow {
head: Node,
tail: Vec<EdgeNode>
}
#[derive(Debug, PartialEq)]
pub struct ResultSet {
rows: Vec<ResultRow>
}
#[derive(Debug)]
pub struct DAG{
roots: HashSet<NodeIndex>,
nodes: HashMap<NodeIndex, DAGNode>
}
#[derive(Debug)]
pub struct DAGNode {
node: Node,
connected_to: HashMap<NodeIndex, Edge>,
}
impl<'a> MatchResult<'a> {
pub fn new(parent_lookup: HashMap<NodeIndex, Vec<(NodeIndex, &'a Edge)>>, finished_nodes: HashSet<State>, graph: &'a GraphDB) -> MatchResult<'a> {
MatchResult {
parent_lookup: parent_lookup,
finished_nodes: finished_nodes,
ref_graph: graph
}
}
pub fn to_result_set(&self) -> ResultSet {
let mut rows = Vec::new();
for node in self.finished_nodes.iter() {
rows.push(ResultRow{
head: self.ref_graph.get_node(*node).unwrap().clone(),
tail: Vec::new()
});
let index = rows.len() - 1;
self.add_node_to_row(*node, &mut rows, index);
}
ResultSet {
rows: self.reverse_rows(rows)
}
}
fn add_node_to_row(&self, node: NodeIndex, rows: &mut Vec<ResultRow>, row_index: usize) {
let mut first = true;
for &(parent, edge) in self.parent_lookup.get(&node).unwrap() {
let index = if first {
first = false;
row_index
} else {
let new_row = rows[row_index].clone();
rows.push(new_row);
rows.len() - 1
};
rows[index].tail.push(EdgeNode {
edge: edge.clone(),
node: self.ref_graph.get_node(parent).unwrap().clone() | });
self.add_node_to_row(parent, rows, index);
}
}
fn reverse_rows(&self, rows: Vec<ResultRow>) -> Vec<ResultRow> {
let mut new_rows = Vec::new();
for row in rows {
if row.tail.len() == 0 {
new_rows.push(row)
} else {
let mut hanging_edge = None;
let mut first = true;
let mut new_row = ResultRow {
// XXX This is an extra copy that we don't technically need, but rust complains
// if we move the value
head: row.tail.get(row.tail.len() - 1).unwrap().node.clone(),
tail: Vec::new()
};
for edge_node in row.tail.into_iter().rev() {
if first {
first = false;
} else {
new_row.tail.push(
EdgeNode {
node: edge_node.node,
edge: hanging_edge.unwrap()
}
);
}
hanging_edge = Some(edge_node.edge);
}
new_row.tail.push(EdgeNode {
node: row.head,
edge: hanging_edge.unwrap()
});
new_rows.push(new_row);
}
}
new_rows
}
pub fn to_dag(&self) -> DAG {
let mut nodes = HashMap::new();
let mut roots = HashSet::new();
for node in self.finished_nodes.iter() {
self.add_node_rec(*node, None, &mut nodes, &mut roots);
}
DAG {
nodes: nodes,
roots: roots
}
}
fn add_node_rec(&self, node: NodeIndex, child: Option<(NodeIndex, &'a Edge)>, nodes: &mut HashMap<NodeIndex, DAGNode>, roots: &mut HashSet<NodeIndex>) {
{
let mut dag_node = nodes.entry(node).or_insert(DAGNode{node: self.ref_graph.get_node(node).unwrap().clone(), connected_to: HashMap::new()});
if let Some(child) = child {
if!dag_node.connected_to.contains_key(&child.0) {
dag_node.connected_to.insert(child.0, child.1.clone());
} else {
return
}
}
}
let lookup = self.parent_lookup.get(&node).unwrap();
if lookup.is_empty() {
roots.insert(node);
} else {
for &(parent, edge) in lookup {
self.add_node_rec(parent, Some((node, edge)), nodes, roots)
}
}
}
}
struct Link {
source: NodeIndex,
target: NodeIndex
}
impl Encodable for DAG {
fn encode<E:Encoder>(&self, encoder: &mut E) -> Result<(), E::Error> {
let mut node_array = Vec::new();
let mut link_array = Vec::new();
let mut id_lookup = HashMap::new();
for (index, (id, node)) in self.nodes.iter().enumerate() {
node_array.push(&node.node);
id_lookup.insert(id, index);
}
for (id, node) in self.nodes.iter() {
let id = id_lookup.get(id).unwrap();
for (target, _) in node.connected_to.iter() {
link_array.push(Link{source: id.clone(), target: id_lookup[target].clone()})
}
}
encoder.emit_struct("root", 2, |encoder| {
try!(encoder.emit_struct_field("nodes", 0, |encoder| {
node_array.encode(encoder)
}));
encoder.emit_struct_field("links", 1, |encoder| {
link_array.encode(encoder)
})
})
}
}
impl Encodable for Link {
fn encode<E:Encoder>(&self, encoder: &mut E) -> Result<(), E::Error> {
encoder.emit_struct("root", 2, |encoder| {
try!(encoder.emit_struct_field("source", 0, |encoder| {
encoder.emit_usize(self.source)
}));
encoder.emit_struct_field("target", 1, |encoder| {
encoder.emit_usize(self.target)
})
})
}
}
impl PartialEq<DAGNode> for DAGNode {
fn eq(&self, other: &DAGNode) -> bool {
self.node == other.node
}
}
impl Hash for DAGNode {
fn hash<H>(&self, state: &mut H) where H: Hasher {
self.node.hash(state)
}
}
impl Graph for DAG {
fn get_node(&self, node_id: NodeIndex) -> Option<&Node> {
match self.nodes.get(&node_id) {
Some(ref dag_node) => Some(&dag_node.node),
None => None
}
}
/// Get Nodes with a key-value pair
fn nodes_with_prop(&self, key: &str, value: &PropVal) -> Vec<NodeIndex> {
self.nodes.values().filter(|node|
node.node.props.iter().find(|&(k, v)| **k == *key && *v == *value ).is_some())
.map(|node|
node.node.id
).collect()
}
fn are_connected(&mut self, origin: NodeIndex, destination: NodeIndex) -> bool {
match self.nodes.get(&origin) {
Some(ref node) => node.connected_to.contains_key(&destination),
None => false
}
}
fn edges_from(&self, source: NodeIndex) -> &HashMap<NodeIndex, Edge> {
&self.nodes.get(&source).unwrap().connected_to
}
fn edges_with_label(&self, label: &str) -> HashSet<(NodeIndex, NodeIndex)> {
self.nodes.iter().flat_map(|(src, node)|
node.connected_to.iter().filter(|&(_idx, edge)|
edge.labels.contains(label)
).map(move |(idx, _edge)| (*src, *idx))
).collect()
}
fn edges_with_label_from(&self, source: NodeIndex, label: &str) -> Vec<NodeIndex> {
if let Some(node) = self.nodes.get(&source) {
node.connected_to.iter().filter_map(
|(&idx, edge)|{
if edge.labels.contains(label) {
Some(idx)
} else {
None
}}).collect()
} else {
vec![]
}
}
}
#[cfg(test)]
mod test {
use ::{GraphDB, Edge, NodeIndex, Graph};
use super::{ResultRow, EdgeNode};
#[test]
fn result_set_rust_influenced_by() {
fn check_row(rows: &Vec<ResultRow>, g: &GraphDB, end_node: NodeIndex) {
let my_row = ResultRow {
head: g.get_node(112).unwrap().clone(),
tail: vec![EdgeNode{
node: g.get_node(end_node).unwrap().clone(),
edge: Edge {
labels: vec!["influencedBy".to_string().into_boxed_str()].into_iter().collect()
},
}]
};
for row in rows {
if &my_row == row {
return
}
}
panic!("Could not find row {:?} in rows {:?}", my_row, rows);
}
let g = GraphDB::read_from_file("data/langs.graph").unwrap();
let results = g.match_paths("(112) -influencedBy> ()").unwrap().to_result_set();
assert_eq!(5, results.rows.len());
check_row(&results.rows, &g, 212);
check_row(&results.rows, &g, 116);
check_row(&results.rows, &g, 143);
check_row(&results.rows, &g, 245);
check_row(&results.rows, &g, 179);
}
} | random_line_split |
|
results.rs | use std::collections::{HashMap, HashSet};
use super::automata::State;
use rustc_serialize::{Encoder, Encodable};
use ::{Node, NodeIndex, Edge, GraphDB, Graph, PropVal};
use std::hash::{Hash, Hasher};
pub struct MatchResult<'a> {
parent_lookup: HashMap<NodeIndex, Vec<(NodeIndex, &'a Edge)>>,
finished_nodes: HashSet<NodeIndex>,
ref_graph: &'a GraphDB
}
#[derive(Clone, Debug, PartialEq)]
pub struct EdgeNode {
edge: Edge,
node: Node
}
#[derive(Clone, Debug, PartialEq)]
pub struct ResultRow {
head: Node,
tail: Vec<EdgeNode>
}
#[derive(Debug, PartialEq)]
pub struct ResultSet {
rows: Vec<ResultRow>
}
#[derive(Debug)]
pub struct DAG{
roots: HashSet<NodeIndex>,
nodes: HashMap<NodeIndex, DAGNode>
}
#[derive(Debug)]
pub struct DAGNode {
node: Node,
connected_to: HashMap<NodeIndex, Edge>,
}
impl<'a> MatchResult<'a> {
pub fn new(parent_lookup: HashMap<NodeIndex, Vec<(NodeIndex, &'a Edge)>>, finished_nodes: HashSet<State>, graph: &'a GraphDB) -> MatchResult<'a> {
MatchResult {
parent_lookup: parent_lookup,
finished_nodes: finished_nodes,
ref_graph: graph
}
}
pub fn to_result_set(&self) -> ResultSet {
let mut rows = Vec::new();
for node in self.finished_nodes.iter() {
rows.push(ResultRow{
head: self.ref_graph.get_node(*node).unwrap().clone(),
tail: Vec::new()
});
let index = rows.len() - 1;
self.add_node_to_row(*node, &mut rows, index);
}
ResultSet {
rows: self.reverse_rows(rows)
}
}
fn add_node_to_row(&self, node: NodeIndex, rows: &mut Vec<ResultRow>, row_index: usize) {
let mut first = true;
for &(parent, edge) in self.parent_lookup.get(&node).unwrap() {
let index = if first {
first = false;
row_index
} else {
let new_row = rows[row_index].clone();
rows.push(new_row);
rows.len() - 1
};
rows[index].tail.push(EdgeNode {
edge: edge.clone(),
node: self.ref_graph.get_node(parent).unwrap().clone()
});
self.add_node_to_row(parent, rows, index);
}
}
fn reverse_rows(&self, rows: Vec<ResultRow>) -> Vec<ResultRow> {
let mut new_rows = Vec::new();
for row in rows {
if row.tail.len() == 0 {
new_rows.push(row)
} else {
let mut hanging_edge = None;
let mut first = true;
let mut new_row = ResultRow {
// XXX This is an extra copy that we don't technically need, but rust complains
// if we move the value
head: row.tail.get(row.tail.len() - 1).unwrap().node.clone(),
tail: Vec::new()
};
for edge_node in row.tail.into_iter().rev() {
if first {
first = false;
} else {
new_row.tail.push(
EdgeNode {
node: edge_node.node,
edge: hanging_edge.unwrap()
}
);
}
hanging_edge = Some(edge_node.edge);
}
new_row.tail.push(EdgeNode {
node: row.head,
edge: hanging_edge.unwrap()
});
new_rows.push(new_row);
}
}
new_rows
}
pub fn to_dag(&self) -> DAG {
let mut nodes = HashMap::new();
let mut roots = HashSet::new();
for node in self.finished_nodes.iter() {
self.add_node_rec(*node, None, &mut nodes, &mut roots);
}
DAG {
nodes: nodes,
roots: roots
}
}
fn add_node_rec(&self, node: NodeIndex, child: Option<(NodeIndex, &'a Edge)>, nodes: &mut HashMap<NodeIndex, DAGNode>, roots: &mut HashSet<NodeIndex>) {
{
let mut dag_node = nodes.entry(node).or_insert(DAGNode{node: self.ref_graph.get_node(node).unwrap().clone(), connected_to: HashMap::new()});
if let Some(child) = child {
if!dag_node.connected_to.contains_key(&child.0) {
dag_node.connected_to.insert(child.0, child.1.clone());
} else {
return
}
}
}
let lookup = self.parent_lookup.get(&node).unwrap();
if lookup.is_empty() {
roots.insert(node);
} else {
for &(parent, edge) in lookup {
self.add_node_rec(parent, Some((node, edge)), nodes, roots)
}
}
}
}
struct Link {
source: NodeIndex,
target: NodeIndex
}
impl Encodable for DAG {
fn encode<E:Encoder>(&self, encoder: &mut E) -> Result<(), E::Error> {
let mut node_array = Vec::new();
let mut link_array = Vec::new();
let mut id_lookup = HashMap::new();
for (index, (id, node)) in self.nodes.iter().enumerate() {
node_array.push(&node.node);
id_lookup.insert(id, index);
}
for (id, node) in self.nodes.iter() {
let id = id_lookup.get(id).unwrap();
for (target, _) in node.connected_to.iter() {
link_array.push(Link{source: id.clone(), target: id_lookup[target].clone()})
}
}
encoder.emit_struct("root", 2, |encoder| {
try!(encoder.emit_struct_field("nodes", 0, |encoder| {
node_array.encode(encoder)
}));
encoder.emit_struct_field("links", 1, |encoder| {
link_array.encode(encoder)
})
})
}
}
impl Encodable for Link {
fn encode<E:Encoder>(&self, encoder: &mut E) -> Result<(), E::Error> {
encoder.emit_struct("root", 2, |encoder| {
try!(encoder.emit_struct_field("source", 0, |encoder| {
encoder.emit_usize(self.source)
}));
encoder.emit_struct_field("target", 1, |encoder| {
encoder.emit_usize(self.target)
})
})
}
}
impl PartialEq<DAGNode> for DAGNode {
fn eq(&self, other: &DAGNode) -> bool {
self.node == other.node
}
}
impl Hash for DAGNode {
fn hash<H>(&self, state: &mut H) where H: Hasher {
self.node.hash(state)
}
}
impl Graph for DAG {
fn get_node(&self, node_id: NodeIndex) -> Option<&Node> {
match self.nodes.get(&node_id) {
Some(ref dag_node) => Some(&dag_node.node),
None => None
}
}
/// Get Nodes with a key-value pair
fn nodes_with_prop(&self, key: &str, value: &PropVal) -> Vec<NodeIndex> {
self.nodes.values().filter(|node|
node.node.props.iter().find(|&(k, v)| **k == *key && *v == *value ).is_some())
.map(|node|
node.node.id
).collect()
}
fn are_connected(&mut self, origin: NodeIndex, destination: NodeIndex) -> bool {
match self.nodes.get(&origin) {
Some(ref node) => node.connected_to.contains_key(&destination),
None => false
}
}
fn edges_from(&self, source: NodeIndex) -> &HashMap<NodeIndex, Edge> {
&self.nodes.get(&source).unwrap().connected_to
}
fn edges_with_label(&self, label: &str) -> HashSet<(NodeIndex, NodeIndex)> {
self.nodes.iter().flat_map(|(src, node)|
node.connected_to.iter().filter(|&(_idx, edge)|
edge.labels.contains(label)
).map(move |(idx, _edge)| (*src, *idx))
).collect()
}
fn edges_with_label_from(&self, source: NodeIndex, label: &str) -> Vec<NodeIndex> {
if let Some(node) = self.nodes.get(&source) {
node.connected_to.iter().filter_map(
|(&idx, edge)|{
if edge.labels.contains(label) {
Some(idx)
} else | }).collect()
} else {
vec![]
}
}
}
#[cfg(test)]
mod test {
use ::{GraphDB, Edge, NodeIndex, Graph};
use super::{ResultRow, EdgeNode};
#[test]
fn result_set_rust_influenced_by() {
fn check_row(rows: &Vec<ResultRow>, g: &GraphDB, end_node: NodeIndex) {
let my_row = ResultRow {
head: g.get_node(112).unwrap().clone(),
tail: vec![EdgeNode{
node: g.get_node(end_node).unwrap().clone(),
edge: Edge {
labels: vec!["influencedBy".to_string().into_boxed_str()].into_iter().collect()
},
}]
};
for row in rows {
if &my_row == row {
return
}
}
panic!("Could not find row {:?} in rows {:?}", my_row, rows);
}
let g = GraphDB::read_from_file("data/langs.graph").unwrap();
let results = g.match_paths("(112) -influencedBy> ()").unwrap().to_result_set();
assert_eq!(5, results.rows.len());
check_row(&results.rows, &g, 212);
check_row(&results.rows, &g, 116);
check_row(&results.rows, &g, 143);
check_row(&results.rows, &g, 245);
check_row(&results.rows, &g, 179);
}
}
| {
None
} | conditional_block |
results.rs | use std::collections::{HashMap, HashSet};
use super::automata::State;
use rustc_serialize::{Encoder, Encodable};
use ::{Node, NodeIndex, Edge, GraphDB, Graph, PropVal};
use std::hash::{Hash, Hasher};
pub struct MatchResult<'a> {
parent_lookup: HashMap<NodeIndex, Vec<(NodeIndex, &'a Edge)>>,
finished_nodes: HashSet<NodeIndex>,
ref_graph: &'a GraphDB
}
#[derive(Clone, Debug, PartialEq)]
pub struct EdgeNode {
edge: Edge,
node: Node
}
#[derive(Clone, Debug, PartialEq)]
pub struct ResultRow {
head: Node,
tail: Vec<EdgeNode>
}
#[derive(Debug, PartialEq)]
pub struct ResultSet {
rows: Vec<ResultRow>
}
#[derive(Debug)]
pub struct DAG{
roots: HashSet<NodeIndex>,
nodes: HashMap<NodeIndex, DAGNode>
}
#[derive(Debug)]
pub struct | {
node: Node,
connected_to: HashMap<NodeIndex, Edge>,
}
impl<'a> MatchResult<'a> {
pub fn new(parent_lookup: HashMap<NodeIndex, Vec<(NodeIndex, &'a Edge)>>, finished_nodes: HashSet<State>, graph: &'a GraphDB) -> MatchResult<'a> {
MatchResult {
parent_lookup: parent_lookup,
finished_nodes: finished_nodes,
ref_graph: graph
}
}
pub fn to_result_set(&self) -> ResultSet {
let mut rows = Vec::new();
for node in self.finished_nodes.iter() {
rows.push(ResultRow{
head: self.ref_graph.get_node(*node).unwrap().clone(),
tail: Vec::new()
});
let index = rows.len() - 1;
self.add_node_to_row(*node, &mut rows, index);
}
ResultSet {
rows: self.reverse_rows(rows)
}
}
fn add_node_to_row(&self, node: NodeIndex, rows: &mut Vec<ResultRow>, row_index: usize) {
let mut first = true;
for &(parent, edge) in self.parent_lookup.get(&node).unwrap() {
let index = if first {
first = false;
row_index
} else {
let new_row = rows[row_index].clone();
rows.push(new_row);
rows.len() - 1
};
rows[index].tail.push(EdgeNode {
edge: edge.clone(),
node: self.ref_graph.get_node(parent).unwrap().clone()
});
self.add_node_to_row(parent, rows, index);
}
}
fn reverse_rows(&self, rows: Vec<ResultRow>) -> Vec<ResultRow> {
let mut new_rows = Vec::new();
for row in rows {
if row.tail.len() == 0 {
new_rows.push(row)
} else {
let mut hanging_edge = None;
let mut first = true;
let mut new_row = ResultRow {
// XXX This is an extra copy that we don't technically need, but rust complains
// if we move the value
head: row.tail.get(row.tail.len() - 1).unwrap().node.clone(),
tail: Vec::new()
};
for edge_node in row.tail.into_iter().rev() {
if first {
first = false;
} else {
new_row.tail.push(
EdgeNode {
node: edge_node.node,
edge: hanging_edge.unwrap()
}
);
}
hanging_edge = Some(edge_node.edge);
}
new_row.tail.push(EdgeNode {
node: row.head,
edge: hanging_edge.unwrap()
});
new_rows.push(new_row);
}
}
new_rows
}
pub fn to_dag(&self) -> DAG {
let mut nodes = HashMap::new();
let mut roots = HashSet::new();
for node in self.finished_nodes.iter() {
self.add_node_rec(*node, None, &mut nodes, &mut roots);
}
DAG {
nodes: nodes,
roots: roots
}
}
fn add_node_rec(&self, node: NodeIndex, child: Option<(NodeIndex, &'a Edge)>, nodes: &mut HashMap<NodeIndex, DAGNode>, roots: &mut HashSet<NodeIndex>) {
{
let mut dag_node = nodes.entry(node).or_insert(DAGNode{node: self.ref_graph.get_node(node).unwrap().clone(), connected_to: HashMap::new()});
if let Some(child) = child {
if!dag_node.connected_to.contains_key(&child.0) {
dag_node.connected_to.insert(child.0, child.1.clone());
} else {
return
}
}
}
let lookup = self.parent_lookup.get(&node).unwrap();
if lookup.is_empty() {
roots.insert(node);
} else {
for &(parent, edge) in lookup {
self.add_node_rec(parent, Some((node, edge)), nodes, roots)
}
}
}
}
struct Link {
source: NodeIndex,
target: NodeIndex
}
impl Encodable for DAG {
fn encode<E:Encoder>(&self, encoder: &mut E) -> Result<(), E::Error> {
let mut node_array = Vec::new();
let mut link_array = Vec::new();
let mut id_lookup = HashMap::new();
for (index, (id, node)) in self.nodes.iter().enumerate() {
node_array.push(&node.node);
id_lookup.insert(id, index);
}
for (id, node) in self.nodes.iter() {
let id = id_lookup.get(id).unwrap();
for (target, _) in node.connected_to.iter() {
link_array.push(Link{source: id.clone(), target: id_lookup[target].clone()})
}
}
encoder.emit_struct("root", 2, |encoder| {
try!(encoder.emit_struct_field("nodes", 0, |encoder| {
node_array.encode(encoder)
}));
encoder.emit_struct_field("links", 1, |encoder| {
link_array.encode(encoder)
})
})
}
}
impl Encodable for Link {
fn encode<E:Encoder>(&self, encoder: &mut E) -> Result<(), E::Error> {
encoder.emit_struct("root", 2, |encoder| {
try!(encoder.emit_struct_field("source", 0, |encoder| {
encoder.emit_usize(self.source)
}));
encoder.emit_struct_field("target", 1, |encoder| {
encoder.emit_usize(self.target)
})
})
}
}
impl PartialEq<DAGNode> for DAGNode {
fn eq(&self, other: &DAGNode) -> bool {
self.node == other.node
}
}
impl Hash for DAGNode {
fn hash<H>(&self, state: &mut H) where H: Hasher {
self.node.hash(state)
}
}
impl Graph for DAG {
fn get_node(&self, node_id: NodeIndex) -> Option<&Node> {
match self.nodes.get(&node_id) {
Some(ref dag_node) => Some(&dag_node.node),
None => None
}
}
/// Get Nodes with a key-value pair
fn nodes_with_prop(&self, key: &str, value: &PropVal) -> Vec<NodeIndex> {
self.nodes.values().filter(|node|
node.node.props.iter().find(|&(k, v)| **k == *key && *v == *value ).is_some())
.map(|node|
node.node.id
).collect()
}
fn are_connected(&mut self, origin: NodeIndex, destination: NodeIndex) -> bool {
match self.nodes.get(&origin) {
Some(ref node) => node.connected_to.contains_key(&destination),
None => false
}
}
fn edges_from(&self, source: NodeIndex) -> &HashMap<NodeIndex, Edge> {
&self.nodes.get(&source).unwrap().connected_to
}
fn edges_with_label(&self, label: &str) -> HashSet<(NodeIndex, NodeIndex)> {
self.nodes.iter().flat_map(|(src, node)|
node.connected_to.iter().filter(|&(_idx, edge)|
edge.labels.contains(label)
).map(move |(idx, _edge)| (*src, *idx))
).collect()
}
fn edges_with_label_from(&self, source: NodeIndex, label: &str) -> Vec<NodeIndex> {
if let Some(node) = self.nodes.get(&source) {
node.connected_to.iter().filter_map(
|(&idx, edge)|{
if edge.labels.contains(label) {
Some(idx)
} else {
None
}}).collect()
} else {
vec![]
}
}
}
#[cfg(test)]
mod test {
use ::{GraphDB, Edge, NodeIndex, Graph};
use super::{ResultRow, EdgeNode};
#[test]
fn result_set_rust_influenced_by() {
fn check_row(rows: &Vec<ResultRow>, g: &GraphDB, end_node: NodeIndex) {
let my_row = ResultRow {
head: g.get_node(112).unwrap().clone(),
tail: vec![EdgeNode{
node: g.get_node(end_node).unwrap().clone(),
edge: Edge {
labels: vec!["influencedBy".to_string().into_boxed_str()].into_iter().collect()
},
}]
};
for row in rows {
if &my_row == row {
return
}
}
panic!("Could not find row {:?} in rows {:?}", my_row, rows);
}
let g = GraphDB::read_from_file("data/langs.graph").unwrap();
let results = g.match_paths("(112) -influencedBy> ()").unwrap().to_result_set();
assert_eq!(5, results.rows.len());
check_row(&results.rows, &g, 212);
check_row(&results.rows, &g, 116);
check_row(&results.rows, &g, 143);
check_row(&results.rows, &g, 245);
check_row(&results.rows, &g, 179);
}
}
| DAGNode | identifier_name |
lib.rs | // VST2 plugin for Boucle.
//
// Following: https://vaporsoft.net/creating-an-audio-plugin-with-rust-vst/
// Docs: https://docs.rs/vst/0.2.1/vst/
#[cfg(feature = "vst")]
pub mod boucle_vst {
#[macro_use]
extern crate vst;
use vst::api::Events;
use vst::buffer::AudioBuffer;
use vst::event::Event;
use vst::plugin::{Category, Info, Plugin};
#[derive(Default)]
struct BoucleVst;
type VstSample = f32;
impl Plugin for BoucleVst {
fn get_info(&self) -> Info |
fn process_events(&mut self, events: &Events) {
for event in events.events() {
match event {
Event::Midi(ev) => {
println!("Got MIDI event: {}.", ev.data[0]);
},
_ => (),
}
}
}
fn process(&mut self, buffer: &mut AudioBuffer<VstSample>) {
let (_input_buffer, mut output_buffer) = buffer.split();
for output_channel in output_buffer.into_iter() {
for output_sample in output_channel {
*output_sample = 0f32;
}
}
}
}
plugin_main!(BoucleVst);
}
| {
Info {
name: "Boucle".to_string(),
vendor: "Medium Length Life".to_string(),
unique_id: 42,
inputs: 2,
outputs: 2,
version: 1,
category: Category::Effect,
..Default::default()
}
} | identifier_body |
lib.rs | // VST2 plugin for Boucle.
//
// Following: https://vaporsoft.net/creating-an-audio-plugin-with-rust-vst/
// Docs: https://docs.rs/vst/0.2.1/vst/
#[cfg(feature = "vst")]
pub mod boucle_vst {
#[macro_use]
extern crate vst;
use vst::api::Events;
use vst::buffer::AudioBuffer;
use vst::event::Event;
use vst::plugin::{Category, Info, Plugin};
#[derive(Default)]
struct | ;
type VstSample = f32;
impl Plugin for BoucleVst {
fn get_info(&self) -> Info {
Info {
name: "Boucle".to_string(),
vendor: "Medium Length Life".to_string(),
unique_id: 42,
inputs: 2,
outputs: 2,
version: 1,
category: Category::Effect,
..Default::default()
}
}
fn process_events(&mut self, events: &Events) {
for event in events.events() {
match event {
Event::Midi(ev) => {
println!("Got MIDI event: {}.", ev.data[0]);
},
_ => (),
}
}
}
fn process(&mut self, buffer: &mut AudioBuffer<VstSample>) {
let (_input_buffer, mut output_buffer) = buffer.split();
for output_channel in output_buffer.into_iter() {
for output_sample in output_channel {
*output_sample = 0f32;
}
}
}
}
plugin_main!(BoucleVst);
}
| BoucleVst | identifier_name |
lib.rs | // VST2 plugin for Boucle.
//
// Following: https://vaporsoft.net/creating-an-audio-plugin-with-rust-vst/
// Docs: https://docs.rs/vst/0.2.1/vst/
#[cfg(feature = "vst")]
pub mod boucle_vst {
#[macro_use]
extern crate vst;
use vst::api::Events;
use vst::buffer::AudioBuffer;
use vst::event::Event;
use vst::plugin::{Category, Info, Plugin};
#[derive(Default)]
struct BoucleVst;
type VstSample = f32;
impl Plugin for BoucleVst {
fn get_info(&self) -> Info {
Info {
name: "Boucle".to_string(),
vendor: "Medium Length Life".to_string(),
unique_id: 42,
inputs: 2,
outputs: 2,
version: 1,
category: Category::Effect,
..Default::default()
}
}
fn process_events(&mut self, events: &Events) {
for event in events.events() {
match event {
Event::Midi(ev) => {
println!("Got MIDI event: {}.", ev.data[0]);
},
_ => (), | let (_input_buffer, mut output_buffer) = buffer.split();
for output_channel in output_buffer.into_iter() {
for output_sample in output_channel {
*output_sample = 0f32;
}
}
}
}
plugin_main!(BoucleVst);
} | }
}
}
fn process(&mut self, buffer: &mut AudioBuffer<VstSample>) { | random_line_split |
parsers.rs | use regex;
use models;
lazy_static! {
static ref CARD_UPDATE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*id=(?P<id>\d*).*cardId=(?P<card_id>[a-zA-Z0-9_]*).*player=(?P<player>\d*)")
.unwrap();
static ref GAME_COMPLETE_PATTERN: regex::Regex = regex::Regex::new(
r".*TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE.*")
.unwrap();
}
#[derive(Debug)]
pub enum LogEvent {
GameComplete,
PowerLogRecreated,
Play(models::Play),
}
pub fn parse_log_line(line: &str) -> Option<LogEvent> {
if GAME_COMPLETE_PATTERN.is_match(line) |
CARD_UPDATE_PATTERN
.captures(line)
.and_then(|group| {
let id = group.name("id").map(|m| m.as_str());
let card_id = group.name("card_id").map(|m| m.as_str());
let player = group.name("player").map(|m| m.as_str());
match (id, card_id, player) {
(Some(id), Some(card_id), Some(player)) if card_id!= "" => {
Some(LogEvent::Play(models::Play {
id: id.to_string(),
card_id: card_id.to_string(),
player: player.to_string(),
}))
}
_ => None,
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_game_complete() {
let log_line = r"D 15:36:19.5943367 PowerTaskList.DebugPrintPower() - TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE";
assert!(GAME_COMPLETE_PATTERN.is_match(log_line));
}
#[test]
fn test_card_update() {
let log_line = r"D 14:50:27.0664788 GameState.DebugPrintEntityChoices() - Entities[4]=[name=La pièce id=68 zone=HAND zonePos=5 cardId=GAME_005 player=1]";
assert!(CARD_UPDATE_PATTERN.is_match(log_line));
}
}
| {
return Some(LogEvent::GameComplete);
} | conditional_block |
parsers.rs | use regex;
use models;
lazy_static! {
static ref CARD_UPDATE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*id=(?P<id>\d*).*cardId=(?P<card_id>[a-zA-Z0-9_]*).*player=(?P<player>\d*)")
.unwrap();
static ref GAME_COMPLETE_PATTERN: regex::Regex = regex::Regex::new(
r".*TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE.*")
.unwrap();
}
#[derive(Debug)]
pub enum LogEvent {
GameComplete,
PowerLogRecreated,
Play(models::Play),
}
pub fn parse_log_line(line: &str) -> Option<LogEvent> {
if GAME_COMPLETE_PATTERN.is_match(line) {
return Some(LogEvent::GameComplete);
}
CARD_UPDATE_PATTERN
.captures(line)
.and_then(|group| {
let id = group.name("id").map(|m| m.as_str());
let card_id = group.name("card_id").map(|m| m.as_str());
let player = group.name("player").map(|m| m.as_str());
match (id, card_id, player) {
(Some(id), Some(card_id), Some(player)) if card_id!= "" => {
Some(LogEvent::Play(models::Play {
id: id.to_string(),
card_id: card_id.to_string(),
player: player.to_string(),
}))
}
_ => None,
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_game_complete() {
let log_line = r"D 15:36:19.5943367 PowerTaskList.DebugPrintPower() - TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE";
assert!(GAME_COMPLETE_PATTERN.is_match(log_line));
}
#[test]
fn test_card_update() {
let log_line = r"D 14:50:27.0664788 GameState.DebugPrintEntityChoices() - Entities[4]=[name=La pièce id=68 zone=HAND zonePos=5 cardId=GAME_005 player=1]"; | assert!(CARD_UPDATE_PATTERN.is_match(log_line));
}
} | random_line_split |
|
parsers.rs | use regex;
use models;
lazy_static! {
static ref CARD_UPDATE_PATTERN: regex::Regex = regex::Regex::new(
r"^.*id=(?P<id>\d*).*cardId=(?P<card_id>[a-zA-Z0-9_]*).*player=(?P<player>\d*)")
.unwrap();
static ref GAME_COMPLETE_PATTERN: regex::Regex = regex::Regex::new(
r".*TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE.*")
.unwrap();
}
#[derive(Debug)]
pub enum | {
GameComplete,
PowerLogRecreated,
Play(models::Play),
}
pub fn parse_log_line(line: &str) -> Option<LogEvent> {
if GAME_COMPLETE_PATTERN.is_match(line) {
return Some(LogEvent::GameComplete);
}
CARD_UPDATE_PATTERN
.captures(line)
.and_then(|group| {
let id = group.name("id").map(|m| m.as_str());
let card_id = group.name("card_id").map(|m| m.as_str());
let player = group.name("player").map(|m| m.as_str());
match (id, card_id, player) {
(Some(id), Some(card_id), Some(player)) if card_id!= "" => {
Some(LogEvent::Play(models::Play {
id: id.to_string(),
card_id: card_id.to_string(),
player: player.to_string(),
}))
}
_ => None,
}
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_game_complete() {
let log_line = r"D 15:36:19.5943367 PowerTaskList.DebugPrintPower() - TAG_CHANGE Entity=GameEntity tag=STATE value=COMPLETE";
assert!(GAME_COMPLETE_PATTERN.is_match(log_line));
}
#[test]
fn test_card_update() {
let log_line = r"D 14:50:27.0664788 GameState.DebugPrintEntityChoices() - Entities[4]=[name=La pièce id=68 zone=HAND zonePos=5 cardId=GAME_005 player=1]";
assert!(CARD_UPDATE_PATTERN.is_match(log_line));
}
}
| LogEvent | identifier_name |
monomorphize.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::link::exported_name;
use session;
use llvm::ValueRef;
use llvm;
use middle::infer;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::traits;
use middle::ty_fold::{TypeFolder, TypeFoldable};
use rustc::ast_map;
use trans::attributes;
use trans::base::{trans_enum_variant, push_ctxt, get_item_val};
use trans::base::trans_fn;
use trans::base;
use trans::common::*;
use trans::declare;
use trans::foreign;
use middle::ty::{self, HasTypeFlags, Ty};
use syntax::abi; | use syntax::ast_util::local_def;
use syntax::attr;
use syntax::codemap::DUMMY_SP;
use std::hash::{Hasher, Hash, SipHasher};
pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_id: ast::DefId,
psubsts: &'tcx subst::Substs<'tcx>,
ref_id: Option<ast::NodeId>)
-> (ValueRef, Ty<'tcx>, bool) {
debug!("monomorphic_fn(\
fn_id={:?}, \
real_substs={:?}, \
ref_id={:?})",
fn_id,
psubsts,
ref_id);
assert!(!psubsts.types.needs_infer() &&!psubsts.types.has_param_types());
let _icx = push_ctxt("monomorphic_fn");
let hash_id = MonoId {
def: fn_id,
params: &psubsts.types
};
let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
debug!("monomorphic_fn about to subst into {:?}", item_ty);
let mono_ty = item_ty.subst(ccx.tcx(), psubsts);
match ccx.monomorphized().borrow().get(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ccx.tcx().item_path_str(fn_id));
return (val, mono_ty, false);
}
None => ()
}
debug!("monomorphic_fn(\
fn_id={:?}, \
psubsts={:?}, \
hash_id={:?})",
fn_id,
psubsts,
hash_id);
let map_node = session::expect(
ccx.sess(),
ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
an item defined in a different crate?)",
fn_id)
});
if let ast_map::NodeForeignItem(_) = map_node {
if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), mono_ty, true);
}
}
debug!("mono_ty = {:?} (post-substitution)", mono_ty);
let mono_ty = normalize_associated_type(ccx.tcx(), &mono_ty);
debug!("mono_ty = {:?} (post-normalization)", mono_ty);
ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.get(&fn_id) {
Some(&d) => d, None => 0
};
// Random cut-off -- code that needs to instantiate the same function
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
monomorphizing.insert(fn_id, depth + 1);
}
let hash;
let s = {
let mut state = SipHasher::new();
hash_id.hash(&mut state);
mono_ty.hash(&mut state);
hash = format!("h{}", state.finish());
ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, &hash[..])
})
};
debug!("monomorphize_fn mangled to {}", s);
// This shouldn't need to option dance.
let mut hash_id = Some(hash_id);
let mut mk_lldecl = |abi: abi::Abi| {
let lldecl = if abi!= abi::Rust {
foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s[..])
} else {
// FIXME(nagisa): perhaps needs a more fine grained selection? See setup_lldecl below.
declare::define_internal_rust_fn(ccx, &s[..], mono_ty).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", s));
})
};
ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
attributes::from_fn_attrs(ccx, attrs, lldecl);
let is_first =!ccx.available_monomorphizations().borrow().contains(&s);
if is_first {
ccx.available_monomorphizations().borrow_mut().insert(s.clone());
}
let trans_everywhere = attr::requests_inline(attrs);
if trans_everywhere &&!is_first {
llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
}
// If `true`, then `lldecl` should be given a function body.
// Otherwise, it should be left as a declaration of an external
// function, with no definition in the current compilation unit.
trans_everywhere || is_first
};
let lldecl = match map_node {
ast_map::NodeItem(i) => {
match *i {
ast::Item {
node: ast::ItemFn(ref decl, _, _, abi, _, ref body),
..
} => {
let d = mk_lldecl(abi);
let needs_body = setup_lldecl(d, &i.attrs);
if needs_body {
if abi!= abi::Rust {
foreign::trans_rust_fn_with_foreign_abi(
ccx, &**decl, &**body, &[], d, psubsts, fn_id.node,
Some(&hash[..]));
} else {
trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]);
}
}
d
}
_ => {
ccx.sess().bug("Can't monomorphize this kind of item")
}
}
}
ast_map::NodeVariant(v) => {
let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ccx.tcx().enum_variants(local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
match v.node.kind {
ast::TupleVariantKind(ref args) => {
trans_enum_variant(ccx,
parent,
&*v,
&args[..],
this_tv.disr_val,
psubsts,
d);
}
ast::StructVariantKind(_) =>
ccx.sess().bug("can't monomorphize struct variants"),
}
d
}
ast_map::NodeImplItem(impl_item) => {
match impl_item.node {
ast::MethodImplItem(ref sig, ref body) => {
let d = mk_lldecl(abi::Rust);
let needs_body = setup_lldecl(d, &impl_item.attrs);
if needs_body {
trans_fn(ccx,
&sig.decl,
body,
d,
psubsts,
impl_item.id,
&[]);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
ast_map::NodeTraitItem(trait_item) => {
match trait_item.node {
ast::MethodTraitItem(ref sig, Some(ref body)) => {
let d = mk_lldecl(abi::Rust);
let needs_body = setup_lldecl(d, &trait_item.attrs);
if needs_body {
trans_fn(ccx, &sig.decl, body, d,
psubsts, trait_item.id, &[]);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
ast_map::NodeStructCtor(struct_def) => {
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
base::trans_tuple_struct(ccx,
&struct_def.fields,
struct_def.ctor_id.expect("ast-mapped tuple struct \
didn't have a ctor id"),
psubsts,
d);
d
}
// Ugh -- but this ensures any new variants won't be forgotten
ast_map::NodeForeignItem(..) |
ast_map::NodeLifetime(..) |
ast_map::NodeExpr(..) |
ast_map::NodeStmt(..) |
ast_map::NodeArg(..) |
ast_map::NodeBlock(..) |
ast_map::NodePat(..) |
ast_map::NodeLocal(..) => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
};
ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
(lldecl, mono_ty, true)
}
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct MonoId<'tcx> {
pub def: ast::DefId,
pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>>
}
/// Monomorphizes a type from the AST by first applying the in-scope
/// substitutions and then normalizing any associated types.
pub fn apply_param_substs<'tcx,T>(tcx: &ty::ctxt<'tcx>,
param_substs: &Substs<'tcx>,
value: &T)
-> T
where T : TypeFoldable<'tcx> + HasTypeFlags
{
let substituted = value.subst(tcx, param_substs);
normalize_associated_type(tcx, &substituted)
}
/// Removes associated types, if any. Since this during
/// monomorphization, we know that only concrete types are involved,
/// and hence we can be sure that all associated types will be
/// completely normalized away.
pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T
where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_associated_type(t={:?})", value);
let value = erase_regions(tcx, value);
if!value.has_projection_types() {
return value;
}
// FIXME(#20304) -- cache
let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
let mut selcx = traits::SelectionContext::new(&infcx);
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: result, obligations } =
traits::normalize(&mut selcx, cause, &value);
debug!("normalize_associated_type: result={:?} obligations={:?}",
result,
obligations);
let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
for obligation in obligations {
fulfill_cx.register_predicate_obligation(&infcx, obligation);
}
let result = drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result);
result
} | use syntax::ast; | random_line_split |
monomorphize.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::link::exported_name;
use session;
use llvm::ValueRef;
use llvm;
use middle::infer;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::traits;
use middle::ty_fold::{TypeFolder, TypeFoldable};
use rustc::ast_map;
use trans::attributes;
use trans::base::{trans_enum_variant, push_ctxt, get_item_val};
use trans::base::trans_fn;
use trans::base;
use trans::common::*;
use trans::declare;
use trans::foreign;
use middle::ty::{self, HasTypeFlags, Ty};
use syntax::abi;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::attr;
use syntax::codemap::DUMMY_SP;
use std::hash::{Hasher, Hash, SipHasher};
pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_id: ast::DefId,
psubsts: &'tcx subst::Substs<'tcx>,
ref_id: Option<ast::NodeId>)
-> (ValueRef, Ty<'tcx>, bool) {
debug!("monomorphic_fn(\
fn_id={:?}, \
real_substs={:?}, \
ref_id={:?})",
fn_id,
psubsts,
ref_id);
assert!(!psubsts.types.needs_infer() &&!psubsts.types.has_param_types());
let _icx = push_ctxt("monomorphic_fn");
let hash_id = MonoId {
def: fn_id,
params: &psubsts.types
};
let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
debug!("monomorphic_fn about to subst into {:?}", item_ty);
let mono_ty = item_ty.subst(ccx.tcx(), psubsts);
match ccx.monomorphized().borrow().get(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ccx.tcx().item_path_str(fn_id));
return (val, mono_ty, false);
}
None => ()
}
debug!("monomorphic_fn(\
fn_id={:?}, \
psubsts={:?}, \
hash_id={:?})",
fn_id,
psubsts,
hash_id);
let map_node = session::expect(
ccx.sess(),
ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
an item defined in a different crate?)",
fn_id)
});
if let ast_map::NodeForeignItem(_) = map_node {
if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), mono_ty, true);
}
}
debug!("mono_ty = {:?} (post-substitution)", mono_ty);
let mono_ty = normalize_associated_type(ccx.tcx(), &mono_ty);
debug!("mono_ty = {:?} (post-normalization)", mono_ty);
ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.get(&fn_id) {
Some(&d) => d, None => 0
};
// Random cut-off -- code that needs to instantiate the same function
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
monomorphizing.insert(fn_id, depth + 1);
}
let hash;
let s = {
let mut state = SipHasher::new();
hash_id.hash(&mut state);
mono_ty.hash(&mut state);
hash = format!("h{}", state.finish());
ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, &hash[..])
})
};
debug!("monomorphize_fn mangled to {}", s);
// This shouldn't need to option dance.
let mut hash_id = Some(hash_id);
let mut mk_lldecl = |abi: abi::Abi| {
let lldecl = if abi!= abi::Rust {
foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s[..])
} else {
// FIXME(nagisa): perhaps needs a more fine grained selection? See setup_lldecl below.
declare::define_internal_rust_fn(ccx, &s[..], mono_ty).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", s));
})
};
ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
attributes::from_fn_attrs(ccx, attrs, lldecl);
let is_first =!ccx.available_monomorphizations().borrow().contains(&s);
if is_first {
ccx.available_monomorphizations().borrow_mut().insert(s.clone());
}
let trans_everywhere = attr::requests_inline(attrs);
if trans_everywhere &&!is_first {
llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
}
// If `true`, then `lldecl` should be given a function body.
// Otherwise, it should be left as a declaration of an external
// function, with no definition in the current compilation unit.
trans_everywhere || is_first
};
let lldecl = match map_node {
ast_map::NodeItem(i) => {
match *i {
ast::Item {
node: ast::ItemFn(ref decl, _, _, abi, _, ref body),
..
} => {
let d = mk_lldecl(abi);
let needs_body = setup_lldecl(d, &i.attrs);
if needs_body {
if abi!= abi::Rust {
foreign::trans_rust_fn_with_foreign_abi(
ccx, &**decl, &**body, &[], d, psubsts, fn_id.node,
Some(&hash[..]));
} else {
trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]);
}
}
d
}
_ => {
ccx.sess().bug("Can't monomorphize this kind of item")
}
}
}
ast_map::NodeVariant(v) => {
let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ccx.tcx().enum_variants(local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
match v.node.kind {
ast::TupleVariantKind(ref args) => {
trans_enum_variant(ccx,
parent,
&*v,
&args[..],
this_tv.disr_val,
psubsts,
d);
}
ast::StructVariantKind(_) =>
ccx.sess().bug("can't monomorphize struct variants"),
}
d
}
ast_map::NodeImplItem(impl_item) => {
match impl_item.node {
ast::MethodImplItem(ref sig, ref body) => {
let d = mk_lldecl(abi::Rust);
let needs_body = setup_lldecl(d, &impl_item.attrs);
if needs_body {
trans_fn(ccx,
&sig.decl,
body,
d,
psubsts,
impl_item.id,
&[]);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
ast_map::NodeTraitItem(trait_item) => {
match trait_item.node {
ast::MethodTraitItem(ref sig, Some(ref body)) => {
let d = mk_lldecl(abi::Rust);
let needs_body = setup_lldecl(d, &trait_item.attrs);
if needs_body {
trans_fn(ccx, &sig.decl, body, d,
psubsts, trait_item.id, &[]);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
ast_map::NodeStructCtor(struct_def) => {
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
base::trans_tuple_struct(ccx,
&struct_def.fields,
struct_def.ctor_id.expect("ast-mapped tuple struct \
didn't have a ctor id"),
psubsts,
d);
d
}
// Ugh -- but this ensures any new variants won't be forgotten
ast_map::NodeForeignItem(..) |
ast_map::NodeLifetime(..) |
ast_map::NodeExpr(..) |
ast_map::NodeStmt(..) |
ast_map::NodeArg(..) |
ast_map::NodeBlock(..) |
ast_map::NodePat(..) |
ast_map::NodeLocal(..) => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
};
ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
(lldecl, mono_ty, true)
}
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct MonoId<'tcx> {
pub def: ast::DefId,
pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>>
}
/// Monomorphizes a type from the AST by first applying the in-scope
/// substitutions and then normalizing any associated types.
pub fn | <'tcx,T>(tcx: &ty::ctxt<'tcx>,
param_substs: &Substs<'tcx>,
value: &T)
-> T
where T : TypeFoldable<'tcx> + HasTypeFlags
{
let substituted = value.subst(tcx, param_substs);
normalize_associated_type(tcx, &substituted)
}
/// Removes associated types, if any. Since this during
/// monomorphization, we know that only concrete types are involved,
/// and hence we can be sure that all associated types will be
/// completely normalized away.
pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T
where T : TypeFoldable<'tcx> + HasTypeFlags
{
debug!("normalize_associated_type(t={:?})", value);
let value = erase_regions(tcx, value);
if!value.has_projection_types() {
return value;
}
// FIXME(#20304) -- cache
let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
let mut selcx = traits::SelectionContext::new(&infcx);
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: result, obligations } =
traits::normalize(&mut selcx, cause, &value);
debug!("normalize_associated_type: result={:?} obligations={:?}",
result,
obligations);
let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
for obligation in obligations {
fulfill_cx.register_predicate_obligation(&infcx, obligation);
}
let result = drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result);
result
}
| apply_param_substs | identifier_name |
monomorphize.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::link::exported_name;
use session;
use llvm::ValueRef;
use llvm;
use middle::infer;
use middle::subst;
use middle::subst::{Subst, Substs};
use middle::traits;
use middle::ty_fold::{TypeFolder, TypeFoldable};
use rustc::ast_map;
use trans::attributes;
use trans::base::{trans_enum_variant, push_ctxt, get_item_val};
use trans::base::trans_fn;
use trans::base;
use trans::common::*;
use trans::declare;
use trans::foreign;
use middle::ty::{self, HasTypeFlags, Ty};
use syntax::abi;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::attr;
use syntax::codemap::DUMMY_SP;
use std::hash::{Hasher, Hash, SipHasher};
pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_id: ast::DefId,
psubsts: &'tcx subst::Substs<'tcx>,
ref_id: Option<ast::NodeId>)
-> (ValueRef, Ty<'tcx>, bool) {
debug!("monomorphic_fn(\
fn_id={:?}, \
real_substs={:?}, \
ref_id={:?})",
fn_id,
psubsts,
ref_id);
assert!(!psubsts.types.needs_infer() &&!psubsts.types.has_param_types());
let _icx = push_ctxt("monomorphic_fn");
let hash_id = MonoId {
def: fn_id,
params: &psubsts.types
};
let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
debug!("monomorphic_fn about to subst into {:?}", item_ty);
let mono_ty = item_ty.subst(ccx.tcx(), psubsts);
match ccx.monomorphized().borrow().get(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ccx.tcx().item_path_str(fn_id));
return (val, mono_ty, false);
}
None => ()
}
debug!("monomorphic_fn(\
fn_id={:?}, \
psubsts={:?}, \
hash_id={:?})",
fn_id,
psubsts,
hash_id);
let map_node = session::expect(
ccx.sess(),
ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
an item defined in a different crate?)",
fn_id)
});
if let ast_map::NodeForeignItem(_) = map_node {
if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), mono_ty, true);
}
}
debug!("mono_ty = {:?} (post-substitution)", mono_ty);
let mono_ty = normalize_associated_type(ccx.tcx(), &mono_ty);
debug!("mono_ty = {:?} (post-normalization)", mono_ty);
ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.get(&fn_id) {
Some(&d) => d, None => 0
};
// Random cut-off -- code that needs to instantiate the same function
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
monomorphizing.insert(fn_id, depth + 1);
}
let hash;
let s = {
let mut state = SipHasher::new();
hash_id.hash(&mut state);
mono_ty.hash(&mut state);
hash = format!("h{}", state.finish());
ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, &hash[..])
})
};
debug!("monomorphize_fn mangled to {}", s);
// This shouldn't need to option dance.
let mut hash_id = Some(hash_id);
let mut mk_lldecl = |abi: abi::Abi| {
let lldecl = if abi!= abi::Rust {
foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s[..])
} else {
// FIXME(nagisa): perhaps needs a more fine grained selection? See setup_lldecl below.
declare::define_internal_rust_fn(ccx, &s[..], mono_ty).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", s));
})
};
ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
attributes::from_fn_attrs(ccx, attrs, lldecl);
let is_first =!ccx.available_monomorphizations().borrow().contains(&s);
if is_first {
ccx.available_monomorphizations().borrow_mut().insert(s.clone());
}
let trans_everywhere = attr::requests_inline(attrs);
if trans_everywhere &&!is_first {
llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
}
// If `true`, then `lldecl` should be given a function body.
// Otherwise, it should be left as a declaration of an external
// function, with no definition in the current compilation unit.
trans_everywhere || is_first
};
let lldecl = match map_node {
ast_map::NodeItem(i) => {
match *i {
ast::Item {
node: ast::ItemFn(ref decl, _, _, abi, _, ref body),
..
} => {
let d = mk_lldecl(abi);
let needs_body = setup_lldecl(d, &i.attrs);
if needs_body {
if abi!= abi::Rust {
foreign::trans_rust_fn_with_foreign_abi(
ccx, &**decl, &**body, &[], d, psubsts, fn_id.node,
Some(&hash[..]));
} else {
trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]);
}
}
d
}
_ => {
ccx.sess().bug("Can't monomorphize this kind of item")
}
}
}
ast_map::NodeVariant(v) => {
let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ccx.tcx().enum_variants(local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
match v.node.kind {
ast::TupleVariantKind(ref args) => {
trans_enum_variant(ccx,
parent,
&*v,
&args[..],
this_tv.disr_val,
psubsts,
d);
}
ast::StructVariantKind(_) =>
ccx.sess().bug("can't monomorphize struct variants"),
}
d
}
ast_map::NodeImplItem(impl_item) => {
match impl_item.node {
ast::MethodImplItem(ref sig, ref body) => {
let d = mk_lldecl(abi::Rust);
let needs_body = setup_lldecl(d, &impl_item.attrs);
if needs_body {
trans_fn(ccx,
&sig.decl,
body,
d,
psubsts,
impl_item.id,
&[]);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
ast_map::NodeTraitItem(trait_item) => {
match trait_item.node {
ast::MethodTraitItem(ref sig, Some(ref body)) => {
let d = mk_lldecl(abi::Rust);
let needs_body = setup_lldecl(d, &trait_item.attrs);
if needs_body {
trans_fn(ccx, &sig.decl, body, d,
psubsts, trait_item.id, &[]);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
ast_map::NodeStructCtor(struct_def) => {
let d = mk_lldecl(abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
base::trans_tuple_struct(ccx,
&struct_def.fields,
struct_def.ctor_id.expect("ast-mapped tuple struct \
didn't have a ctor id"),
psubsts,
d);
d
}
// Ugh -- but this ensures any new variants won't be forgotten
ast_map::NodeForeignItem(..) |
ast_map::NodeLifetime(..) |
ast_map::NodeExpr(..) |
ast_map::NodeStmt(..) |
ast_map::NodeArg(..) |
ast_map::NodeBlock(..) |
ast_map::NodePat(..) |
ast_map::NodeLocal(..) => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
};
ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
(lldecl, mono_ty, true)
}
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct MonoId<'tcx> {
pub def: ast::DefId,
pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>>
}
/// Monomorphizes a type from the AST by first applying the in-scope
/// substitutions and then normalizing any associated types.
pub fn apply_param_substs<'tcx,T>(tcx: &ty::ctxt<'tcx>,
param_substs: &Substs<'tcx>,
value: &T)
-> T
where T : TypeFoldable<'tcx> + HasTypeFlags
{
let substituted = value.subst(tcx, param_substs);
normalize_associated_type(tcx, &substituted)
}
/// Removes associated types, if any. Since this during
/// monomorphization, we know that only concrete types are involved,
/// and hence we can be sure that all associated types will be
/// completely normalized away.
pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T
where T : TypeFoldable<'tcx> + HasTypeFlags
| let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
for obligation in obligations {
fulfill_cx.register_predicate_obligation(&infcx, obligation);
}
let result = drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result);
result
}
| {
debug!("normalize_associated_type(t={:?})", value);
let value = erase_regions(tcx, value);
if !value.has_projection_types() {
return value;
}
// FIXME(#20304) -- cache
let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
let mut selcx = traits::SelectionContext::new(&infcx);
let cause = traits::ObligationCause::dummy();
let traits::Normalized { value: result, obligations } =
traits::normalize(&mut selcx, cause, &value);
debug!("normalize_associated_type: result={:?} obligations={:?}",
result,
obligations);
| identifier_body |
messagebox.rs | use std::ffi::CString;
use std::ptr;
use video::Window;
use get_error;
use SdlResult;
use util::CStringExt;
use sys::messagebox as ll;
bitflags! {
flags MessageBoxFlag: u32 {
const MESSAGEBOX_ERROR = ll::SDL_MESSAGEBOX_ERROR,
const MESSAGEBOX_WARNING = ll::SDL_MESSAGEBOX_WARNING,
const MESSAGEBOX_INFORMATION = ll::SDL_MESSAGEBOX_INFORMATION
}
}
pub fn show_simple_message_box(flags: MessageBoxFlag, title: &str, message: &str, window: Option<&Window>) -> SdlResult<()> {
let result = unsafe {
let title = CString::new(title).remove_nul();
let message = CString::new(message).remove_nul();
ll::SDL_ShowSimpleMessageBox(flags.bits(),
title.as_ptr(),
message.as_ptr(),
window.map_or(ptr::null_mut(), |win| win.raw()))
} == 0;
if result {
Ok(())
} else |
}
| {
Err(get_error())
} | conditional_block |
messagebox.rs | use std::ffi::CString;
use std::ptr;
use video::Window;
use get_error;
use SdlResult;
use util::CStringExt;
use sys::messagebox as ll;
bitflags! {
flags MessageBoxFlag: u32 {
const MESSAGEBOX_ERROR = ll::SDL_MESSAGEBOX_ERROR,
const MESSAGEBOX_WARNING = ll::SDL_MESSAGEBOX_WARNING,
const MESSAGEBOX_INFORMATION = ll::SDL_MESSAGEBOX_INFORMATION
}
}
pub fn show_simple_message_box(flags: MessageBoxFlag, title: &str, message: &str, window: Option<&Window>) -> SdlResult<()> | {
let result = unsafe {
let title = CString::new(title).remove_nul();
let message = CString::new(message).remove_nul();
ll::SDL_ShowSimpleMessageBox(flags.bits(),
title.as_ptr(),
message.as_ptr(),
window.map_or(ptr::null_mut(), |win| win.raw()))
} == 0;
if result {
Ok(())
} else {
Err(get_error())
}
} | identifier_body |
|
messagebox.rs | use std::ffi::CString;
use std::ptr;
use video::Window;
use get_error;
use SdlResult;
use util::CStringExt;
use sys::messagebox as ll;
bitflags! {
flags MessageBoxFlag: u32 {
const MESSAGEBOX_ERROR = ll::SDL_MESSAGEBOX_ERROR,
const MESSAGEBOX_WARNING = ll::SDL_MESSAGEBOX_WARNING,
const MESSAGEBOX_INFORMATION = ll::SDL_MESSAGEBOX_INFORMATION
}
}
pub fn | (flags: MessageBoxFlag, title: &str, message: &str, window: Option<&Window>) -> SdlResult<()> {
let result = unsafe {
let title = CString::new(title).remove_nul();
let message = CString::new(message).remove_nul();
ll::SDL_ShowSimpleMessageBox(flags.bits(),
title.as_ptr(),
message.as_ptr(),
window.map_or(ptr::null_mut(), |win| win.raw()))
} == 0;
if result {
Ok(())
} else {
Err(get_error())
}
}
| show_simple_message_box | identifier_name |
messagebox.rs | use std::ffi::CString;
use std::ptr;
use video::Window;
use get_error;
use SdlResult;
use util::CStringExt;
use sys::messagebox as ll;
bitflags! {
flags MessageBoxFlag: u32 {
const MESSAGEBOX_ERROR = ll::SDL_MESSAGEBOX_ERROR,
const MESSAGEBOX_WARNING = ll::SDL_MESSAGEBOX_WARNING,
const MESSAGEBOX_INFORMATION = ll::SDL_MESSAGEBOX_INFORMATION
}
}
pub fn show_simple_message_box(flags: MessageBoxFlag, title: &str, message: &str, window: Option<&Window>) -> SdlResult<()> {
let result = unsafe {
let title = CString::new(title).remove_nul();
let message = CString::new(message).remove_nul();
ll::SDL_ShowSimpleMessageBox(flags.bits(),
title.as_ptr(),
message.as_ptr(),
window.map_or(ptr::null_mut(), |win| win.raw()))
} == 0;
if result {
Ok(()) | } | } else {
Err(get_error())
} | random_line_split |
htmlfontelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::attr::Attr;
use crate::dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use servo_atoms::Atom;
use style::attr::AttrValue;
use style::str::{read_numbers, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLFontElement> {
Node::reflect_node(
Box::new(HTMLFontElement::new_inherited(local_name, prefix, document)),
document,
HTMLFontElementBinding::Wrap,
)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
element.set_attribute(&local_name!("size"), parse_size(&value));
}
}
impl VirtualMethods for HTMLFontElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn attribute_affects_presentational_hints(&self, attr: &Attr) -> bool {
if attr.local_name() == &local_name!("color") {
return true;
}
// FIXME: Should also return true for `size` and `face` changes!
self.super_type()
.unwrap()
.attribute_affects_presentational_hints(attr)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("face") => AttrValue::from_atomic(value.into()),
&local_name!("color") => AttrValue::from_legacy_color(value.into()), | .unwrap()
.parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<u32>;
}
impl HTMLFontElementLayoutHelpers for LayoutDom<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<u32> {
let size = unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("size"))
};
match size {
Some(&AttrValue::UInt(_, s)) => Some(s),
_ => None,
}
}
}
/// <https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size>
fn parse_size(mut input: &str) -> AttrValue {
let original_input = input;
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return AttrValue::String(original_input.into()),
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+'
ParseMode::RelativePlus
},
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
},
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) if v >= 0 => v,
_ => return AttrValue::String(original_input.into()),
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
AttrValue::UInt(original_input.into(), value as u32)
} | &local_name!("size") => parse_size(&value),
_ => self
.super_type() | random_line_split |
htmlfontelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::attr::Attr;
use crate::dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use servo_atoms::Atom;
use style::attr::AttrValue;
use style::str::{read_numbers, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLFontElement> {
Node::reflect_node(
Box::new(HTMLFontElement::new_inherited(local_name, prefix, document)),
document,
HTMLFontElementBinding::Wrap,
)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
element.set_attribute(&local_name!("size"), parse_size(&value));
}
}
impl VirtualMethods for HTMLFontElement {
fn | (&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn attribute_affects_presentational_hints(&self, attr: &Attr) -> bool {
if attr.local_name() == &local_name!("color") {
return true;
}
// FIXME: Should also return true for `size` and `face` changes!
self.super_type()
.unwrap()
.attribute_affects_presentational_hints(attr)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("face") => AttrValue::from_atomic(value.into()),
&local_name!("color") => AttrValue::from_legacy_color(value.into()),
&local_name!("size") => parse_size(&value),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<u32>;
}
impl HTMLFontElementLayoutHelpers for LayoutDom<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<u32> {
let size = unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("size"))
};
match size {
Some(&AttrValue::UInt(_, s)) => Some(s),
_ => None,
}
}
}
/// <https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size>
fn parse_size(mut input: &str) -> AttrValue {
let original_input = input;
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return AttrValue::String(original_input.into()),
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+'
ParseMode::RelativePlus
},
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
},
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) if v >= 0 => v,
_ => return AttrValue::String(original_input.into()),
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
AttrValue::UInt(original_input.into(), value as u32)
}
| super_type | identifier_name |
htmlfontelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::attr::Attr;
use crate::dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use servo_atoms::Atom;
use style::attr::AttrValue;
use style::str::{read_numbers, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLFontElement> {
Node::reflect_node(
Box::new(HTMLFontElement::new_inherited(local_name, prefix, document)),
document,
HTMLFontElementBinding::Wrap,
)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
element.set_attribute(&local_name!("size"), parse_size(&value));
}
}
impl VirtualMethods for HTMLFontElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn attribute_affects_presentational_hints(&self, attr: &Attr) -> bool {
if attr.local_name() == &local_name!("color") {
return true;
}
// FIXME: Should also return true for `size` and `face` changes!
self.super_type()
.unwrap()
.attribute_affects_presentational_hints(attr)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("face") => AttrValue::from_atomic(value.into()),
&local_name!("color") => AttrValue::from_legacy_color(value.into()),
&local_name!("size") => parse_size(&value),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<u32>;
}
impl HTMLFontElementLayoutHelpers for LayoutDom<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<u32> {
let size = unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("size"))
};
match size {
Some(&AttrValue::UInt(_, s)) => Some(s),
_ => None,
}
}
}
/// <https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size>
fn parse_size(mut input: &str) -> AttrValue | ParseMode::RelativePlus
},
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
},
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) if v >= 0 => v,
_ => return AttrValue::String(original_input.into()),
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
AttrValue::UInt(original_input.into(), value as u32)
}
| {
let original_input = input;
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return AttrValue::String(original_input.into()),
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+' | identifier_body |
metadata.rs | use std::collections::HashMap;
use std::str;
use crypto::sha1::Sha1;
use crypto::digest::Digest;
use bencode::{Bencode, TypedMethods, BencodeToString};
#[derive(Clone, Debug)]
pub struct | {
length: i64,
md5sum: Option<Vec<u8>>
}
#[derive(Clone, Debug)]
pub struct FileInfo {
length: i64,
md5sum: Option<Vec<u8>>,
path: Vec<String>
}
#[derive(Clone, Debug)]
pub struct MultiFileInfo {
files: Vec<FileInfo>
}
#[derive(Clone, Debug)]
pub enum FileMode {
SingleFile(SingleFileInfo),
MultiFile(MultiFileInfo)
}
#[derive(Debug, Clone)]
pub struct Metadata {
pub announce: String,
pub info_hash: [u8; 20],
name: String,
pub piece_length: i64,
pub pieces: Vec<u8>,
mode_info: FileMode,
}
impl Metadata {
pub fn get_total_length (&self) -> u32 {
let len = match self.mode_info {
FileMode::SingleFile(ref sf) => sf.length,
FileMode::MultiFile(ref mf) => mf.files.iter().fold(0, |a:i64, b:&FileInfo| a + b.length)
};
len as u32
}
}
fn to_file_list (list: &Vec<Bencode>) -> Option<Vec<FileInfo>> {
//TODO: figure out how exception handling works
Some(list.iter().map(|item| {
match item {
&Bencode::Dict(ref hm) => {
let path_list_bencode = hm.get_list("path")
.unwrap_or_else(||panic!("unable to get key path"))
.iter()
.map(|x| match x {
&Bencode::ByteString(ref path) => {
//path.to_string(),
match str::from_utf8(path) {
Ok(v) => v.to_string(),
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
}
}
_ => panic!("unexpected type")
}).collect::<Vec<String>>();
FileInfo {
length: hm.get_int("length").unwrap_or_else(|| panic!("no length in file")),
md5sum: hm.get_owned_string("md5sum"),
path: path_list_bencode
}
},
_ => panic!("not a bencode list of dicts")
}
}).collect::<Vec<FileInfo>>())
}
pub trait MetadataDict {
fn to_metadata (&self) -> Option<Metadata>;
}
impl MetadataDict for HashMap<String, Bencode> {
/// Extracts information from this HashMap into a Metadata instance, if valid. Currently if it
/// is invalid, it will just throw a runtime exception
fn to_metadata (&self) -> Option<Metadata> {
let announce = self.get_string("announce").unwrap_or_else(||panic!("no key found for announce"));
let info_dict = self.get_dict("info").unwrap_or_else(||panic!("no key found for info")).to_owned();
let mut sha = Sha1::new();
let info_as_text = Bencode::Dict(info_dict.clone()).to_bencode_string();
// println!("info_dict: {}", info_as_text);
sha.input(&Bencode::Dict(info_dict.clone()).to_bencode_string());
let mut info_hash:[u8; 20] = [0; 20];
let _ = sha.result(&mut info_hash);
println!("info_hash: {:?}", info_hash);
let mode_info = match info_dict.get_list("files") {
Some(flist) => {
FileMode::MultiFile(MultiFileInfo {
files: to_file_list(flist).unwrap_or_else(|| panic!("unable to deserialize filelist"))
})
},
None => FileMode::SingleFile(SingleFileInfo {
length: info_dict.get_int("length").unwrap_or_else(||panic!("no key found for length")),
md5sum: info_dict.get_owned_string("md5sum")})
};
//for now only handle single file mode
Some(Metadata {
announce: str::from_utf8(&announce).unwrap().to_string(),
info_hash: info_hash,
piece_length: info_dict.get_int("piece length").unwrap_or_else(||panic!("no key found for piece length")),
pieces: info_dict.get_owned_string("pieces").unwrap(),
name: str::from_utf8(info_dict.get_string("name").unwrap_or_else(||panic!("no key found for name"))).unwrap().to_string(),
mode_info: mode_info
})
}
}
| SingleFileInfo | identifier_name |
metadata.rs | use std::collections::HashMap;
use std::str;
use crypto::sha1::Sha1;
use crypto::digest::Digest;
use bencode::{Bencode, TypedMethods, BencodeToString};
#[derive(Clone, Debug)]
pub struct SingleFileInfo {
length: i64,
md5sum: Option<Vec<u8>>
}
#[derive(Clone, Debug)]
pub struct FileInfo {
length: i64,
md5sum: Option<Vec<u8>>,
path: Vec<String>
}
#[derive(Clone, Debug)]
pub struct MultiFileInfo {
files: Vec<FileInfo>
}
#[derive(Clone, Debug)]
pub enum FileMode {
SingleFile(SingleFileInfo),
MultiFile(MultiFileInfo)
}
#[derive(Debug, Clone)]
pub struct Metadata {
pub announce: String,
pub info_hash: [u8; 20],
name: String,
pub piece_length: i64,
pub pieces: Vec<u8>,
mode_info: FileMode,
}
impl Metadata {
pub fn get_total_length (&self) -> u32 {
let len = match self.mode_info {
FileMode::SingleFile(ref sf) => sf.length,
FileMode::MultiFile(ref mf) => mf.files.iter().fold(0, |a:i64, b:&FileInfo| a + b.length)
};
len as u32
}
}
fn to_file_list (list: &Vec<Bencode>) -> Option<Vec<FileInfo>> {
//TODO: figure out how exception handling works
Some(list.iter().map(|item| {
match item {
&Bencode::Dict(ref hm) => {
let path_list_bencode = hm.get_list("path")
.unwrap_or_else(||panic!("unable to get key path"))
.iter()
.map(|x| match x {
&Bencode::ByteString(ref path) => {
//path.to_string(),
match str::from_utf8(path) {
Ok(v) => v.to_string(),
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
}
}
_ => panic!("unexpected type")
}).collect::<Vec<String>>();
FileInfo {
length: hm.get_int("length").unwrap_or_else(|| panic!("no length in file")),
md5sum: hm.get_owned_string("md5sum"),
path: path_list_bencode
}
},
_ => panic!("not a bencode list of dicts")
}
}).collect::<Vec<FileInfo>>())
}
pub trait MetadataDict {
fn to_metadata (&self) -> Option<Metadata>;
}
impl MetadataDict for HashMap<String, Bencode> {
/// Extracts information from this HashMap into a Metadata instance, if valid. Currently if it
/// is invalid, it will just throw a runtime exception
fn to_metadata (&self) -> Option<Metadata> | md5sum: info_dict.get_owned_string("md5sum")})
};
//for now only handle single file mode
Some(Metadata {
announce: str::from_utf8(&announce).unwrap().to_string(),
info_hash: info_hash,
piece_length: info_dict.get_int("piece length").unwrap_or_else(||panic!("no key found for piece length")),
pieces: info_dict.get_owned_string("pieces").unwrap(),
name: str::from_utf8(info_dict.get_string("name").unwrap_or_else(||panic!("no key found for name"))).unwrap().to_string(),
mode_info: mode_info
})
}
}
| {
let announce = self.get_string("announce").unwrap_or_else(||panic!("no key found for announce"));
let info_dict = self.get_dict("info").unwrap_or_else(||panic!("no key found for info")).to_owned();
let mut sha = Sha1::new();
let info_as_text = Bencode::Dict(info_dict.clone()).to_bencode_string();
// println!("info_dict: {}", info_as_text);
sha.input(&Bencode::Dict(info_dict.clone()).to_bencode_string());
let mut info_hash:[u8; 20] = [0; 20];
let _ = sha.result(&mut info_hash);
println!("info_hash: {:?}", info_hash);
let mode_info = match info_dict.get_list("files") {
Some(flist) => {
FileMode::MultiFile(MultiFileInfo {
files: to_file_list(flist).unwrap_or_else(|| panic!("unable to deserialize filelist"))
})
},
None => FileMode::SingleFile(SingleFileInfo {
length: info_dict.get_int("length").unwrap_or_else(||panic!("no key found for length")), | identifier_body |
metadata.rs | use std::collections::HashMap;
use std::str;
use crypto::sha1::Sha1;
use crypto::digest::Digest;
use bencode::{Bencode, TypedMethods, BencodeToString};
#[derive(Clone, Debug)]
pub struct SingleFileInfo {
length: i64,
md5sum: Option<Vec<u8>>
}
#[derive(Clone, Debug)]
pub struct FileInfo {
length: i64,
md5sum: Option<Vec<u8>>,
path: Vec<String>
}
#[derive(Clone, Debug)]
pub struct MultiFileInfo {
files: Vec<FileInfo>
}
#[derive(Clone, Debug)]
pub enum FileMode {
SingleFile(SingleFileInfo),
MultiFile(MultiFileInfo)
}
#[derive(Debug, Clone)]
pub struct Metadata {
pub announce: String,
pub info_hash: [u8; 20],
name: String,
pub piece_length: i64,
pub pieces: Vec<u8>, | pub fn get_total_length (&self) -> u32 {
let len = match self.mode_info {
FileMode::SingleFile(ref sf) => sf.length,
FileMode::MultiFile(ref mf) => mf.files.iter().fold(0, |a:i64, b:&FileInfo| a + b.length)
};
len as u32
}
}
fn to_file_list (list: &Vec<Bencode>) -> Option<Vec<FileInfo>> {
//TODO: figure out how exception handling works
Some(list.iter().map(|item| {
match item {
&Bencode::Dict(ref hm) => {
let path_list_bencode = hm.get_list("path")
.unwrap_or_else(||panic!("unable to get key path"))
.iter()
.map(|x| match x {
&Bencode::ByteString(ref path) => {
//path.to_string(),
match str::from_utf8(path) {
Ok(v) => v.to_string(),
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
}
}
_ => panic!("unexpected type")
}).collect::<Vec<String>>();
FileInfo {
length: hm.get_int("length").unwrap_or_else(|| panic!("no length in file")),
md5sum: hm.get_owned_string("md5sum"),
path: path_list_bencode
}
},
_ => panic!("not a bencode list of dicts")
}
}).collect::<Vec<FileInfo>>())
}
pub trait MetadataDict {
fn to_metadata (&self) -> Option<Metadata>;
}
impl MetadataDict for HashMap<String, Bencode> {
/// Extracts information from this HashMap into a Metadata instance, if valid. Currently if it
/// is invalid, it will just throw a runtime exception
fn to_metadata (&self) -> Option<Metadata> {
let announce = self.get_string("announce").unwrap_or_else(||panic!("no key found for announce"));
let info_dict = self.get_dict("info").unwrap_or_else(||panic!("no key found for info")).to_owned();
let mut sha = Sha1::new();
let info_as_text = Bencode::Dict(info_dict.clone()).to_bencode_string();
// println!("info_dict: {}", info_as_text);
sha.input(&Bencode::Dict(info_dict.clone()).to_bencode_string());
let mut info_hash:[u8; 20] = [0; 20];
let _ = sha.result(&mut info_hash);
println!("info_hash: {:?}", info_hash);
let mode_info = match info_dict.get_list("files") {
Some(flist) => {
FileMode::MultiFile(MultiFileInfo {
files: to_file_list(flist).unwrap_or_else(|| panic!("unable to deserialize filelist"))
})
},
None => FileMode::SingleFile(SingleFileInfo {
length: info_dict.get_int("length").unwrap_or_else(||panic!("no key found for length")),
md5sum: info_dict.get_owned_string("md5sum")})
};
//for now only handle single file mode
Some(Metadata {
announce: str::from_utf8(&announce).unwrap().to_string(),
info_hash: info_hash,
piece_length: info_dict.get_int("piece length").unwrap_or_else(||panic!("no key found for piece length")),
pieces: info_dict.get_owned_string("pieces").unwrap(),
name: str::from_utf8(info_dict.get_string("name").unwrap_or_else(||panic!("no key found for name"))).unwrap().to_string(),
mode_info: mode_info
})
}
} | mode_info: FileMode,
}
impl Metadata { | random_line_split |
apps.rs | // Copyright 2019 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
//! App management functions
use super::{config, AuthFuture};
use crate::app_auth::{app_state, AppState};
use crate::client::AuthClient;
use crate::ffi::apps as ffi;
use crate::ffi::apps::RegisteredApp as FfiRegisteredApp;
use crate::{app_container, AuthError};
use ffi_utils::{vec_into_raw_parts, ReprC};
use futures::future::Future;
use maidsafe_utilities::serialisation::deserialise;
use routing::User::Key;
use routing::XorName;
use safe_core::client::Client;
use safe_core::ipc::req::{containers_from_repr_c, containers_into_vec, ContainerPermissions};
use safe_core::ipc::resp::{AccessContainerEntry, AppAccess};
use safe_core::ipc::{access_container_enc_key, AppExchangeInfo, IpcError};
use safe_core::utils::symmetric_decrypt;
use safe_core::FutureExt;
use std::collections::HashMap;
/// Represents an application that is registered with the Authenticator.
#[derive(Debug)]
pub struct RegisteredApp {
/// Unique application identifier.
pub app_info: AppExchangeInfo,
/// List of containers that this application has access to.
/// Maps from the container name to the set of permissions.
pub containers: HashMap<String, ContainerPermissions>,
}
impl RegisteredApp {
/// Construct FFI wrapper for the native Rust object, consuming self.
pub fn into_repr_c(self) -> Result<FfiRegisteredApp, IpcError> {
let RegisteredApp {
app_info,
containers,
} = self;
let container_permissions_vec = containers_into_vec(containers.into_iter())?;
let (containers_ptr, containers_len, containers_cap) =
vec_into_raw_parts(container_permissions_vec);
Ok(FfiRegisteredApp {
app_info: app_info.into_repr_c()?,
containers: containers_ptr,
containers_len,
containers_cap,
})
}
}
impl ReprC for RegisteredApp {
type C = *const ffi::RegisteredApp;
type Error = IpcError;
unsafe fn clone_from_repr_c(repr_c: Self::C) -> Result<Self, Self::Error> {
Ok(Self {
app_info: AppExchangeInfo::clone_from_repr_c(&(*repr_c).app_info)?,
containers: containers_from_repr_c((*repr_c).containers, (*repr_c).containers_len)?,
})
}
}
/// Removes an application from the list of revoked apps.
pub fn | (client: &AuthClient, app_id: String) -> Box<AuthFuture<()>> {
let client = client.clone();
let c2 = client.clone();
let c3 = client.clone();
let c4 = client.clone();
let app_id = app_id.clone();
let app_id2 = app_id.clone();
let app_id3 = app_id.clone();
config::list_apps(&client)
.and_then(move |(apps_version, apps)| {
app_state(&c2, &apps, &app_id).map(move |app_state| (app_state, apps, apps_version))
})
.and_then(move |(app_state, apps, apps_version)| match app_state {
AppState::Revoked => Ok((apps, apps_version)),
AppState::Authenticated => Err(AuthError::from("App is not revoked")),
AppState::NotAuthenticated => Err(AuthError::IpcError(IpcError::UnknownApp)),
})
.and_then(move |(apps, apps_version)| {
config::remove_app(&c3, apps, config::next_version(apps_version), &app_id2)
})
.and_then(move |_| app_container::remove(c4, &app_id3).map(move |_res| ()))
.into_box()
}
/// Returns a list of applications that have been revoked.
pub fn list_revoked(client: &AuthClient) -> Box<AuthFuture<Vec<AppExchangeInfo>>> {
let c2 = client.clone();
let c3 = client.clone();
config::list_apps(client)
.map(move |(_, auth_cfg)| (c2.access_container(), auth_cfg))
.and_then(move |(access_container, auth_cfg)| {
c3.list_mdata_entries(access_container.name, access_container.type_tag)
.map_err(From::from)
.map(move |entries| (access_container, entries, auth_cfg))
})
.and_then(move |(access_container, entries, auth_cfg)| {
let mut apps = Vec::new();
let nonce = access_container
.nonce()
.ok_or_else(|| AuthError::from("No nonce on access container's MDataInfo"))?;
for app in auth_cfg.values() {
let key = access_container_enc_key(&app.info.id, &app.keys.enc_key, nonce)?;
// If the app is not in the access container, or if the app entry has
// been deleted (is empty), then it's revoked.
let revoked = entries
.get(&key)
.map(|entry| entry.content.is_empty())
.unwrap_or(true);
if revoked {
apps.push(app.info.clone());
}
}
Ok(apps)
})
.into_box()
}
/// Return the list of applications that are registered with the Authenticator.
pub fn list_registered(client: &AuthClient) -> Box<AuthFuture<Vec<RegisteredApp>>> {
let c2 = client.clone();
let c3 = client.clone();
config::list_apps(client)
.map(move |(_, auth_cfg)| (c2.access_container(), auth_cfg))
.and_then(move |(access_container, auth_cfg)| {
c3.list_mdata_entries(access_container.name, access_container.type_tag)
.map_err(From::from)
.map(move |entries| (access_container, entries, auth_cfg))
})
.and_then(move |(access_container, entries, auth_cfg)| {
let mut apps = Vec::new();
let nonce = access_container
.nonce()
.ok_or_else(|| AuthError::from("No nonce on access container's MDataInfo"))?;
for app in auth_cfg.values() {
let key = access_container_enc_key(&app.info.id, &app.keys.enc_key, nonce)?;
// Empty entry means it has been deleted
let entry = match entries.get(&key) {
Some(entry) if!entry.content.is_empty() => Some(entry),
_ => None,
};
if let Some(entry) = entry {
let plaintext = symmetric_decrypt(&entry.content, &app.keys.enc_key)?;
let app_access = deserialise::<AccessContainerEntry>(&plaintext)?;
let mut containers = HashMap::new();
for (container_name, (_, permission_set)) in app_access {
unwrap!(containers.insert(container_name, permission_set));
}
let registered_app = RegisteredApp {
app_info: app.info.clone(),
containers,
};
apps.push(registered_app);
}
}
Ok(apps)
})
.into_box()
}
/// Returns a list of applications that have access to the specified Mutable Data.
pub fn apps_accessing_mutable_data(
client: &AuthClient,
name: XorName,
type_tag: u64,
) -> Box<AuthFuture<Vec<AppAccess>>> {
let c2 = client.clone();
client
.list_mdata_permissions(name, type_tag)
.map_err(AuthError::from)
.join(config::list_apps(&c2).map(|(_, apps)| {
apps.into_iter()
.map(|(_, app_info)| (app_info.keys.sign_pk, app_info.info))
.collect::<HashMap<_, _>>()
}))
.and_then(move |(permissions, apps)| {
// Map the list of keys retrieved from MD to a list of registered apps (even if
// they're in the Revoked state) and create a new `AppAccess` struct object
let mut app_access_vec: Vec<AppAccess> = Vec::new();
for (user, perm_set) in permissions {
if let Key(public_key) = user {
let app_access = match apps.get(&public_key) {
Some(app_info) => AppAccess {
sign_key: public_key,
permissions: perm_set,
name: Some(app_info.name.clone()),
app_id: Some(app_info.id.clone()),
},
None => {
// If an app is listed in the MD permissions list, but is not
// listed in the registered apps list in Authenticator, then set
// the app_id and app_name fields to None, but provide
// the public sign key and the list of permissions.
AppAccess {
sign_key: public_key,
permissions: perm_set,
name: None,
app_id: None,
}
}
};
app_access_vec.push(app_access);
}
}
Ok(app_access_vec)
})
.into_box()
}
| remove_revoked_app | identifier_name |
apps.rs | // Copyright 2019 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
//! App management functions
use super::{config, AuthFuture};
use crate::app_auth::{app_state, AppState};
use crate::client::AuthClient;
use crate::ffi::apps as ffi;
use crate::ffi::apps::RegisteredApp as FfiRegisteredApp;
use crate::{app_container, AuthError};
use ffi_utils::{vec_into_raw_parts, ReprC};
use futures::future::Future;
use maidsafe_utilities::serialisation::deserialise;
use routing::User::Key;
use routing::XorName;
use safe_core::client::Client;
use safe_core::ipc::req::{containers_from_repr_c, containers_into_vec, ContainerPermissions};
use safe_core::ipc::resp::{AccessContainerEntry, AppAccess};
use safe_core::ipc::{access_container_enc_key, AppExchangeInfo, IpcError};
use safe_core::utils::symmetric_decrypt;
use safe_core::FutureExt;
use std::collections::HashMap;
/// Represents an application that is registered with the Authenticator.
#[derive(Debug)]
pub struct RegisteredApp {
/// Unique application identifier.
pub app_info: AppExchangeInfo,
/// List of containers that this application has access to.
/// Maps from the container name to the set of permissions.
pub containers: HashMap<String, ContainerPermissions>,
}
impl RegisteredApp {
/// Construct FFI wrapper for the native Rust object, consuming self.
pub fn into_repr_c(self) -> Result<FfiRegisteredApp, IpcError> {
let RegisteredApp {
app_info,
containers,
} = self;
let container_permissions_vec = containers_into_vec(containers.into_iter())?;
let (containers_ptr, containers_len, containers_cap) =
vec_into_raw_parts(container_permissions_vec);
Ok(FfiRegisteredApp {
app_info: app_info.into_repr_c()?,
containers: containers_ptr,
containers_len,
containers_cap,
})
}
}
impl ReprC for RegisteredApp {
type C = *const ffi::RegisteredApp;
type Error = IpcError;
unsafe fn clone_from_repr_c(repr_c: Self::C) -> Result<Self, Self::Error> {
Ok(Self {
app_info: AppExchangeInfo::clone_from_repr_c(&(*repr_c).app_info)?,
containers: containers_from_repr_c((*repr_c).containers, (*repr_c).containers_len)?,
})
}
}
/// Removes an application from the list of revoked apps.
pub fn remove_revoked_app(client: &AuthClient, app_id: String) -> Box<AuthFuture<()>> {
let client = client.clone();
let c2 = client.clone();
let c3 = client.clone();
let c4 = client.clone();
let app_id = app_id.clone();
let app_id2 = app_id.clone();
let app_id3 = app_id.clone();
config::list_apps(&client)
.and_then(move |(apps_version, apps)| {
app_state(&c2, &apps, &app_id).map(move |app_state| (app_state, apps, apps_version))
})
.and_then(move |(app_state, apps, apps_version)| match app_state {
AppState::Revoked => Ok((apps, apps_version)),
AppState::Authenticated => Err(AuthError::from("App is not revoked")),
AppState::NotAuthenticated => Err(AuthError::IpcError(IpcError::UnknownApp)),
})
.and_then(move |(apps, apps_version)| {
config::remove_app(&c3, apps, config::next_version(apps_version), &app_id2)
})
.and_then(move |_| app_container::remove(c4, &app_id3).map(move |_res| ()))
.into_box()
}
/// Returns a list of applications that have been revoked.
pub fn list_revoked(client: &AuthClient) -> Box<AuthFuture<Vec<AppExchangeInfo>>> {
let c2 = client.clone();
let c3 = client.clone();
config::list_apps(client)
.map(move |(_, auth_cfg)| (c2.access_container(), auth_cfg))
.and_then(move |(access_container, auth_cfg)| {
c3.list_mdata_entries(access_container.name, access_container.type_tag)
.map_err(From::from)
.map(move |entries| (access_container, entries, auth_cfg))
})
.and_then(move |(access_container, entries, auth_cfg)| {
let mut apps = Vec::new();
let nonce = access_container
.nonce()
.ok_or_else(|| AuthError::from("No nonce on access container's MDataInfo"))?;
for app in auth_cfg.values() {
let key = access_container_enc_key(&app.info.id, &app.keys.enc_key, nonce)?;
| // If the app is not in the access container, or if the app entry has
// been deleted (is empty), then it's revoked.
let revoked = entries
.get(&key)
.map(|entry| entry.content.is_empty())
.unwrap_or(true);
if revoked {
apps.push(app.info.clone());
}
}
Ok(apps)
})
.into_box()
}
/// Return the list of applications that are registered with the Authenticator.
pub fn list_registered(client: &AuthClient) -> Box<AuthFuture<Vec<RegisteredApp>>> {
let c2 = client.clone();
let c3 = client.clone();
config::list_apps(client)
.map(move |(_, auth_cfg)| (c2.access_container(), auth_cfg))
.and_then(move |(access_container, auth_cfg)| {
c3.list_mdata_entries(access_container.name, access_container.type_tag)
.map_err(From::from)
.map(move |entries| (access_container, entries, auth_cfg))
})
.and_then(move |(access_container, entries, auth_cfg)| {
let mut apps = Vec::new();
let nonce = access_container
.nonce()
.ok_or_else(|| AuthError::from("No nonce on access container's MDataInfo"))?;
for app in auth_cfg.values() {
let key = access_container_enc_key(&app.info.id, &app.keys.enc_key, nonce)?;
// Empty entry means it has been deleted
let entry = match entries.get(&key) {
Some(entry) if!entry.content.is_empty() => Some(entry),
_ => None,
};
if let Some(entry) = entry {
let plaintext = symmetric_decrypt(&entry.content, &app.keys.enc_key)?;
let app_access = deserialise::<AccessContainerEntry>(&plaintext)?;
let mut containers = HashMap::new();
for (container_name, (_, permission_set)) in app_access {
unwrap!(containers.insert(container_name, permission_set));
}
let registered_app = RegisteredApp {
app_info: app.info.clone(),
containers,
};
apps.push(registered_app);
}
}
Ok(apps)
})
.into_box()
}
/// Returns a list of applications that have access to the specified Mutable Data.
pub fn apps_accessing_mutable_data(
client: &AuthClient,
name: XorName,
type_tag: u64,
) -> Box<AuthFuture<Vec<AppAccess>>> {
let c2 = client.clone();
client
.list_mdata_permissions(name, type_tag)
.map_err(AuthError::from)
.join(config::list_apps(&c2).map(|(_, apps)| {
apps.into_iter()
.map(|(_, app_info)| (app_info.keys.sign_pk, app_info.info))
.collect::<HashMap<_, _>>()
}))
.and_then(move |(permissions, apps)| {
// Map the list of keys retrieved from MD to a list of registered apps (even if
// they're in the Revoked state) and create a new `AppAccess` struct object
let mut app_access_vec: Vec<AppAccess> = Vec::new();
for (user, perm_set) in permissions {
if let Key(public_key) = user {
let app_access = match apps.get(&public_key) {
Some(app_info) => AppAccess {
sign_key: public_key,
permissions: perm_set,
name: Some(app_info.name.clone()),
app_id: Some(app_info.id.clone()),
},
None => {
// If an app is listed in the MD permissions list, but is not
// listed in the registered apps list in Authenticator, then set
// the app_id and app_name fields to None, but provide
// the public sign key and the list of permissions.
AppAccess {
sign_key: public_key,
permissions: perm_set,
name: None,
app_id: None,
}
}
};
app_access_vec.push(app_access);
}
}
Ok(app_access_vec)
})
.into_box()
} | random_line_split |
|
sph_sha_test.rs | extern crate sphlib;
extern crate libc;
use sphlib::{sph_sha, utils};
#[test]
fn will_be_sha0_hash() {
let dest = sph_sha::sha0_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("f96cea198ad1dd5617ac084a3d92c6107708c0ef", actual.to_string());
}
#[test]
fn will_be_sha1_hash() {
let dest = sph_sha::sha1_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("da39a3ee5e6b4b0d3255bfef95601890afd80709", actual.to_string());
}
#[test]
fn will_be_sha224_hash() {
let dest = sph_sha::sha224_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", actual.to_string());
}
#[test]
fn will_be_sha256_hash() {
let dest = sph_sha::sha256_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", actual.to_string());
}
#[test]
fn will_be_sha384_hash() |
#[test]
fn will_be_sha512_hash() {
let dest = sph_sha::sha512_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", actual.to_string());
}
| {
let dest = sph_sha::sha384_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", actual.to_string());
} | identifier_body |
sph_sha_test.rs | extern crate sphlib;
extern crate libc;
use sphlib::{sph_sha, utils};
#[test]
fn will_be_sha0_hash() {
let dest = sph_sha::sha0_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("f96cea198ad1dd5617ac084a3d92c6107708c0ef", actual.to_string()); | let dest = sph_sha::sha1_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("da39a3ee5e6b4b0d3255bfef95601890afd80709", actual.to_string());
}
#[test]
fn will_be_sha224_hash() {
let dest = sph_sha::sha224_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", actual.to_string());
}
#[test]
fn will_be_sha256_hash() {
let dest = sph_sha::sha256_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", actual.to_string());
}
#[test]
fn will_be_sha384_hash() {
let dest = sph_sha::sha384_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", actual.to_string());
}
#[test]
fn will_be_sha512_hash() {
let dest = sph_sha::sha512_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", actual.to_string());
} | }
#[test]
fn will_be_sha1_hash() { | random_line_split |
sph_sha_test.rs | extern crate sphlib;
extern crate libc;
use sphlib::{sph_sha, utils};
#[test]
fn will_be_sha0_hash() {
let dest = sph_sha::sha0_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("f96cea198ad1dd5617ac084a3d92c6107708c0ef", actual.to_string());
}
#[test]
fn will_be_sha1_hash() {
let dest = sph_sha::sha1_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("da39a3ee5e6b4b0d3255bfef95601890afd80709", actual.to_string());
}
#[test]
fn | () {
let dest = sph_sha::sha224_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", actual.to_string());
}
#[test]
fn will_be_sha256_hash() {
let dest = sph_sha::sha256_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", actual.to_string());
}
#[test]
fn will_be_sha384_hash() {
let dest = sph_sha::sha384_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", actual.to_string());
}
#[test]
fn will_be_sha512_hash() {
let dest = sph_sha::sha512_init_load_close("");
let actual = utils::to_hex_hash(&dest);
assert_eq!("cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", actual.to_string());
}
| will_be_sha224_hash | identifier_name |
udiv128.rs | /// Multiply unsigned 128 bit integers, return upper 128 bits of the result
#[inline]
fn u128_mulhi(x: u128, y: u128) -> u128 {
let x_lo = x as u64;
let x_hi = (x >> 64) as u64;
let y_lo = y as u64;
let y_hi = (y >> 64) as u64;
// handle possibility of overflow
let carry = (x_lo as u128 * y_lo as u128) >> 64;
let m = x_lo as u128 * y_hi as u128 + carry;
let high1 = m >> 64;
let m_lo = m as u64;
let high2 = x_hi as u128 * y_lo as u128 + m_lo as u128 >> 64;
x_hi as u128 * y_hi as u128 + high1 + high2
}
/// Divide `n` by 1e19 and return quotient and remainder
///
/// Integer division algorithm is based on the following paper:
///
/// T. Granlund and P. Montgomery, “Division by Invariant Integers Using Multiplication”
/// in Proc. of the SIGPLAN94 Conference on Programming Language Design and
/// Implementation, 1994, pp. 61–72
///
#[inline]
pub fn udivmod_1e19(n: u128) -> (u128, u64) {
| let d = 10_000_000_000_000_000_000_u64; // 10^19
let quot = if n < 1 << 83 {
((n >> 19) as u64 / (d >> 19)) as u128
} else {
let factor =
(8507059173023461586_u64 as u128) << 64 | 10779635027931437427 as u128;
u128_mulhi(n, factor) >> 62
};
let rem = (n - quot * d as u128) as u64;
debug_assert_eq!(quot, n / d as u128);
debug_assert_eq!(rem as u128, n % d as u128);
(quot, rem)
}
| identifier_body |
|
udiv128.rs | /// Multiply unsigned 128 bit integers, return upper 128 bits of the result
#[inline]
fn u128_mulhi(x: u128, y: u128) -> u128 {
let x_lo = x as u64;
let x_hi = (x >> 64) as u64;
let y_lo = y as u64;
let y_hi = (y >> 64) as u64;
// handle possibility of overflow
let carry = (x_lo as u128 * y_lo as u128) >> 64;
let m = x_lo as u128 * y_hi as u128 + carry;
let high1 = m >> 64;
let m_lo = m as u64; | }
/// Divide `n` by 1e19 and return quotient and remainder
///
/// Integer division algorithm is based on the following paper:
///
/// T. Granlund and P. Montgomery, “Division by Invariant Integers Using Multiplication”
/// in Proc. of the SIGPLAN94 Conference on Programming Language Design and
/// Implementation, 1994, pp. 61–72
///
#[inline]
pub fn udivmod_1e19(n: u128) -> (u128, u64) {
let d = 10_000_000_000_000_000_000_u64; // 10^19
let quot = if n < 1 << 83 {
((n >> 19) as u64 / (d >> 19)) as u128
} else {
let factor =
(8507059173023461586_u64 as u128) << 64 | 10779635027931437427 as u128;
u128_mulhi(n, factor) >> 62
};
let rem = (n - quot * d as u128) as u64;
debug_assert_eq!(quot, n / d as u128);
debug_assert_eq!(rem as u128, n % d as u128);
(quot, rem)
} | let high2 = x_hi as u128 * y_lo as u128 + m_lo as u128 >> 64;
x_hi as u128 * y_hi as u128 + high1 + high2 | random_line_split |
udiv128.rs | /// Multiply unsigned 128 bit integers, return upper 128 bits of the result
#[inline]
fn u128_mulhi(x: u128, y: u128) -> u128 {
let x_lo = x as u64;
let x_hi = (x >> 64) as u64;
let y_lo = y as u64;
let y_hi = (y >> 64) as u64;
// handle possibility of overflow
let carry = (x_lo as u128 * y_lo as u128) >> 64;
let m = x_lo as u128 * y_hi as u128 + carry;
let high1 = m >> 64;
let m_lo = m as u64;
let high2 = x_hi as u128 * y_lo as u128 + m_lo as u128 >> 64;
x_hi as u128 * y_hi as u128 + high1 + high2
}
/// Divide `n` by 1e19 and return quotient and remainder
///
/// Integer division algorithm is based on the following paper:
///
/// T. Granlund and P. Montgomery, “Division by Invariant Integers Using Multiplication”
/// in Proc. of the SIGPLAN94 Conference on Programming Language Design and
/// Implementation, 1994, pp. 61–72
///
#[inline]
pub fn udivmod_1e19(n: u128) -> (u128, u64) {
let d = 10_000_000_000_000_000_000_u64; // 10^19
let quot = if n < 1 << 83 {
| {
let factor =
(8507059173023461586_u64 as u128) << 64 | 10779635027931437427 as u128;
u128_mulhi(n, factor) >> 62
};
let rem = (n - quot * d as u128) as u64;
debug_assert_eq!(quot, n / d as u128);
debug_assert_eq!(rem as u128, n % d as u128);
(quot, rem)
}
| ((n >> 19) as u64 / (d >> 19)) as u128
} else | conditional_block |
udiv128.rs | /// Multiply unsigned 128 bit integers, return upper 128 bits of the result
#[inline]
fn u128_mulhi(x: u128, y: u128) -> u128 {
let x_lo = x as u64;
let x_hi = (x >> 64) as u64;
let y_lo = y as u64;
let y_hi = (y >> 64) as u64;
// handle possibility of overflow
let carry = (x_lo as u128 * y_lo as u128) >> 64;
let m = x_lo as u128 * y_hi as u128 + carry;
let high1 = m >> 64;
let m_lo = m as u64;
let high2 = x_hi as u128 * y_lo as u128 + m_lo as u128 >> 64;
x_hi as u128 * y_hi as u128 + high1 + high2
}
/// Divide `n` by 1e19 and return quotient and remainder
///
/// Integer division algorithm is based on the following paper:
///
/// T. Granlund and P. Montgomery, “Division by Invariant Integers Using Multiplication”
/// in Proc. of the SIGPLAN94 Conference on Programming Language Design and
/// Implementation, 1994, pp. 61–72
///
#[inline]
pub fn udivmo | 28) -> (u128, u64) {
let d = 10_000_000_000_000_000_000_u64; // 10^19
let quot = if n < 1 << 83 {
((n >> 19) as u64 / (d >> 19)) as u128
} else {
let factor =
(8507059173023461586_u64 as u128) << 64 | 10779635027931437427 as u128;
u128_mulhi(n, factor) >> 62
};
let rem = (n - quot * d as u128) as u64;
debug_assert_eq!(quot, n / d as u128);
debug_assert_eq!(rem as u128, n % d as u128);
(quot, rem)
}
| d_1e19(n: u1 | identifier_name |
simple-tuple.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:print/d'simple-tuple::NO_PADDING_8'
// gdb-check:$1 = {-50, 50}
// gdb-command:print'simple-tuple::NO_PADDING_16'
// gdb-check:$2 = {-1, 2, 3}
// gdb-command:print'simple-tuple::NO_PADDING_32'
// gdb-check:$3 = {4, 5, 6}
// gdb-command:print'simple-tuple::NO_PADDING_64'
// gdb-check:$4 = {7, 8, 9}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_1'
// gdb-check:$5 = {10, 11}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_2'
// gdb-check:$6 = {12, 13, 14, 15}
// gdb-command:print'simple-tuple::PADDING_AT_END'
// gdb-check:$7 = {16, 17}
// gdb-command:run
// gdb-command:print/d noPadding8
// gdb-check:$8 = {-100, 100}
// gdb-command:print noPadding16
// gdb-check:$9 = {0, 1, 2}
// gdb-command:print noPadding32
// gdb-check:$10 = {3, 4.5, 5}
// gdb-command:print noPadding64
// gdb-check:$11 = {6, 7.5, 8}
// gdb-command:print internalPadding1
// gdb-check:$12 = {9, 10}
// gdb-command:print internalPadding2
// gdb-check:$13 = {11, 12, 13, 14}
// gdb-command:print paddingAtEnd
// gdb-check:$14 = {15, 16}
// gdb-command:print/d'simple-tuple::NO_PADDING_8'
// gdb-check:$15 = {-127, 127}
// gdb-command:print'simple-tuple::NO_PADDING_16'
// gdb-check:$16 = {-10, 10, 9}
// gdb-command:print'simple-tuple::NO_PADDING_32'
// gdb-check:$17 = {14, 15, 16}
// gdb-command:print'simple-tuple::NO_PADDING_64'
// gdb-check:$18 = {17, 18, 19}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_1'
// gdb-check:$19 = {110, 111}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_2'
// gdb-check:$20 = {112, 113, 114, 115}
// gdb-command:print'simple-tuple::PADDING_AT_END'
// gdb-check:$21 = {116, 117}
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print/d noPadding8
// lldb-check:[...]$0 = (-100, 100)
// lldb-command:print noPadding16
// lldb-check:[...]$1 = (0, 1, 2)
// lldb-command:print noPadding32
// lldb-check:[...]$2 = (3, 4.5, 5)
// lldb-command:print noPadding64
// lldb-check:[...]$3 = (6, 7.5, 8)
// lldb-command:print internalPadding1
// lldb-check:[...]$4 = (9, 10)
// lldb-command:print internalPadding2
// lldb-check:[...]$5 = (11, 12, 13, 14)
// lldb-command:print paddingAtEnd
// lldb-check:[...]$6 = (15, 16)
#![allow(unused_variables)]
#![allow(dead_code)]
static mut NO_PADDING_8: (i8, u8) = (-50, 50);
static mut NO_PADDING_16: (i16, i16, u16) = (-1, 2, 3);
static mut NO_PADDING_32: (i32, f32, u32) = (4, 5.0, 6);
static mut NO_PADDING_64: (i64, f64, u64) = (7, 8.0, 9);
static mut INTERNAL_PADDING_1: (i16, i32) = (10, 11);
static mut INTERNAL_PADDING_2: (i16, i32, u32, u64) = (12, 13, 14, 15);
static mut PADDING_AT_END: (i32, i16) = (16, 17);
fn main() {
let noPadding8: (i8, u8) = (-100, 100);
let noPadding16: (i16, i16, u16) = (0, 1, 2);
let noPadding32: (i32, f32, u32) = (3, 4.5, 5);
let noPadding64: (i64, f64, u64) = (6, 7.5, 8);
let internalPadding1: (i16, i32) = (9, 10);
let internalPadding2: (i16, i32, u32, u64) = (11, 12, 13, 14);
let paddingAtEnd: (i32, i16) = (15, 16);
unsafe {
NO_PADDING_8 = (-127, 127);
NO_PADDING_16 = (-10, 10, 9);
NO_PADDING_32 = (14, 15.0, 16);
NO_PADDING_64 = (17, 18.0, 19);
INTERNAL_PADDING_1 = (110, 111);
INTERNAL_PADDING_2 = (112, 113, 114, 115);
PADDING_AT_END = (116, 117);
}
zzz(); // #break
}
fn | () {()}
| zzz | identifier_name |
simple-tuple.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:print/d'simple-tuple::NO_PADDING_8'
// gdb-check:$1 = {-50, 50}
// gdb-command:print'simple-tuple::NO_PADDING_16'
// gdb-check:$2 = {-1, 2, 3}
// gdb-command:print'simple-tuple::NO_PADDING_32'
// gdb-check:$3 = {4, 5, 6}
// gdb-command:print'simple-tuple::NO_PADDING_64'
// gdb-check:$4 = {7, 8, 9}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_1'
// gdb-check:$5 = {10, 11}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_2'
// gdb-check:$6 = {12, 13, 14, 15}
// gdb-command:print'simple-tuple::PADDING_AT_END'
// gdb-check:$7 = {16, 17}
// gdb-command:run
// gdb-command:print/d noPadding8
// gdb-check:$8 = {-100, 100}
// gdb-command:print noPadding16
// gdb-check:$9 = {0, 1, 2}
// gdb-command:print noPadding32
// gdb-check:$10 = {3, 4.5, 5}
// gdb-command:print noPadding64
// gdb-check:$11 = {6, 7.5, 8}
// gdb-command:print internalPadding1
// gdb-check:$12 = {9, 10}
// gdb-command:print internalPadding2
// gdb-check:$13 = {11, 12, 13, 14}
// gdb-command:print paddingAtEnd
// gdb-check:$14 = {15, 16}
// gdb-command:print/d'simple-tuple::NO_PADDING_8'
// gdb-check:$15 = {-127, 127}
// gdb-command:print'simple-tuple::NO_PADDING_16'
// gdb-check:$16 = {-10, 10, 9}
// gdb-command:print'simple-tuple::NO_PADDING_32'
// gdb-check:$17 = {14, 15, 16}
// gdb-command:print'simple-tuple::NO_PADDING_64'
// gdb-check:$18 = {17, 18, 19}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_1'
// gdb-check:$19 = {110, 111}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_2'
// gdb-check:$20 = {112, 113, 114, 115}
// gdb-command:print'simple-tuple::PADDING_AT_END'
// gdb-check:$21 = {116, 117}
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print/d noPadding8
// lldb-check:[...]$0 = (-100, 100)
// lldb-command:print noPadding16
// lldb-check:[...]$1 = (0, 1, 2)
// lldb-command:print noPadding32
// lldb-check:[...]$2 = (3, 4.5, 5)
// lldb-command:print noPadding64
// lldb-check:[...]$3 = (6, 7.5, 8)
// lldb-command:print internalPadding1
// lldb-check:[...]$4 = (9, 10)
// lldb-command:print internalPadding2
// lldb-check:[...]$5 = (11, 12, 13, 14)
// lldb-command:print paddingAtEnd
// lldb-check:[...]$6 = (15, 16)
#![allow(unused_variables)]
#![allow(dead_code)]
static mut NO_PADDING_8: (i8, u8) = (-50, 50);
static mut NO_PADDING_16: (i16, i16, u16) = (-1, 2, 3);
static mut NO_PADDING_32: (i32, f32, u32) = (4, 5.0, 6);
static mut NO_PADDING_64: (i64, f64, u64) = (7, 8.0, 9);
static mut INTERNAL_PADDING_1: (i16, i32) = (10, 11);
static mut INTERNAL_PADDING_2: (i16, i32, u32, u64) = (12, 13, 14, 15);
static mut PADDING_AT_END: (i32, i16) = (16, 17);
fn main() |
PADDING_AT_END = (116, 117);
}
zzz(); // #break
}
fn zzz() {()}
| {
let noPadding8: (i8, u8) = (-100, 100);
let noPadding16: (i16, i16, u16) = (0, 1, 2);
let noPadding32: (i32, f32, u32) = (3, 4.5, 5);
let noPadding64: (i64, f64, u64) = (6, 7.5, 8);
let internalPadding1: (i16, i32) = (9, 10);
let internalPadding2: (i16, i32, u32, u64) = (11, 12, 13, 14);
let paddingAtEnd: (i32, i16) = (15, 16);
unsafe {
NO_PADDING_8 = (-127, 127);
NO_PADDING_16 = (-10, 10, 9);
NO_PADDING_32 = (14, 15.0, 16);
NO_PADDING_64 = (17, 18.0, 19);
INTERNAL_PADDING_1 = (110, 111);
INTERNAL_PADDING_2 = (112, 113, 114, 115); | identifier_body |
simple-tuple.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:print/d'simple-tuple::NO_PADDING_8'
// gdb-check:$1 = {-50, 50}
// gdb-command:print'simple-tuple::NO_PADDING_16'
// gdb-check:$2 = {-1, 2, 3}
// gdb-command:print'simple-tuple::NO_PADDING_32'
// gdb-check:$3 = {4, 5, 6}
// gdb-command:print'simple-tuple::NO_PADDING_64'
// gdb-check:$4 = {7, 8, 9} | // gdb-command:print'simple-tuple::INTERNAL_PADDING_2'
// gdb-check:$6 = {12, 13, 14, 15}
// gdb-command:print'simple-tuple::PADDING_AT_END'
// gdb-check:$7 = {16, 17}
// gdb-command:run
// gdb-command:print/d noPadding8
// gdb-check:$8 = {-100, 100}
// gdb-command:print noPadding16
// gdb-check:$9 = {0, 1, 2}
// gdb-command:print noPadding32
// gdb-check:$10 = {3, 4.5, 5}
// gdb-command:print noPadding64
// gdb-check:$11 = {6, 7.5, 8}
// gdb-command:print internalPadding1
// gdb-check:$12 = {9, 10}
// gdb-command:print internalPadding2
// gdb-check:$13 = {11, 12, 13, 14}
// gdb-command:print paddingAtEnd
// gdb-check:$14 = {15, 16}
// gdb-command:print/d'simple-tuple::NO_PADDING_8'
// gdb-check:$15 = {-127, 127}
// gdb-command:print'simple-tuple::NO_PADDING_16'
// gdb-check:$16 = {-10, 10, 9}
// gdb-command:print'simple-tuple::NO_PADDING_32'
// gdb-check:$17 = {14, 15, 16}
// gdb-command:print'simple-tuple::NO_PADDING_64'
// gdb-check:$18 = {17, 18, 19}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_1'
// gdb-check:$19 = {110, 111}
// gdb-command:print'simple-tuple::INTERNAL_PADDING_2'
// gdb-check:$20 = {112, 113, 114, 115}
// gdb-command:print'simple-tuple::PADDING_AT_END'
// gdb-check:$21 = {116, 117}
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print/d noPadding8
// lldb-check:[...]$0 = (-100, 100)
// lldb-command:print noPadding16
// lldb-check:[...]$1 = (0, 1, 2)
// lldb-command:print noPadding32
// lldb-check:[...]$2 = (3, 4.5, 5)
// lldb-command:print noPadding64
// lldb-check:[...]$3 = (6, 7.5, 8)
// lldb-command:print internalPadding1
// lldb-check:[...]$4 = (9, 10)
// lldb-command:print internalPadding2
// lldb-check:[...]$5 = (11, 12, 13, 14)
// lldb-command:print paddingAtEnd
// lldb-check:[...]$6 = (15, 16)
#![allow(unused_variables)]
#![allow(dead_code)]
static mut NO_PADDING_8: (i8, u8) = (-50, 50);
static mut NO_PADDING_16: (i16, i16, u16) = (-1, 2, 3);
static mut NO_PADDING_32: (i32, f32, u32) = (4, 5.0, 6);
static mut NO_PADDING_64: (i64, f64, u64) = (7, 8.0, 9);
static mut INTERNAL_PADDING_1: (i16, i32) = (10, 11);
static mut INTERNAL_PADDING_2: (i16, i32, u32, u64) = (12, 13, 14, 15);
static mut PADDING_AT_END: (i32, i16) = (16, 17);
fn main() {
let noPadding8: (i8, u8) = (-100, 100);
let noPadding16: (i16, i16, u16) = (0, 1, 2);
let noPadding32: (i32, f32, u32) = (3, 4.5, 5);
let noPadding64: (i64, f64, u64) = (6, 7.5, 8);
let internalPadding1: (i16, i32) = (9, 10);
let internalPadding2: (i16, i32, u32, u64) = (11, 12, 13, 14);
let paddingAtEnd: (i32, i16) = (15, 16);
unsafe {
NO_PADDING_8 = (-127, 127);
NO_PADDING_16 = (-10, 10, 9);
NO_PADDING_32 = (14, 15.0, 16);
NO_PADDING_64 = (17, 18.0, 19);
INTERNAL_PADDING_1 = (110, 111);
INTERNAL_PADDING_2 = (112, 113, 114, 115);
PADDING_AT_END = (116, 117);
}
zzz(); // #break
}
fn zzz() {()} |
// gdb-command:print 'simple-tuple::INTERNAL_PADDING_1'
// gdb-check:$5 = {10, 11} | random_line_split |
source_util.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{Pos, Span};
use codemap;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use parse::token;
use parse;
use print::pprust;
use ptr::P;
use util::small_vector::SmallVector;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::rc::Rc;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/// line!(): expands to the current line number
pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
base::MacEager::expr(cx.expr_u32(topmost, loc.line as u32))
}
/* column!(): expands to the current column number */
pub fn expand_column(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "column!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
base::MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32))
}
/// file!(): expands to the current filename */
/// The filemap (`loc.file`) contains a bunch more information we could spit
/// out if we wanted.
pub fn | (cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
let filename = token::intern_and_get_ident(&loc.file.name);
base::MacEager::expr(cx.expr_str(topmost, filename))
}
pub fn expand_stringify(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let s = pprust::tts_to_string(tts);
base::MacEager::expr(cx.expr_str(sp,
token::intern_and_get_ident(&s[..])))
}
pub fn expand_mod(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "module_path!");
let string = cx.mod_path()
.iter()
.map(|x| token::get_ident(*x).to_string())
.collect::<Vec<String>>()
.join("::");
base::MacEager::expr(cx.expr_str(
sp,
token::intern_and_get_ident(&string[..])))
}
/// include! : parse the given file as an expr
/// This is generally a bad idea because it's going to behave
/// unhygienically.
pub fn expand_include<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
Some(f) => f,
None => return DummyResult::expr(sp),
};
// The file will be added to the code map by the parser
let p =
parse::new_sub_parser_from_file(cx.parse_sess(),
cx.cfg(),
&res_rel_file(cx,
sp,
Path::new(&file)),
true,
None,
sp);
struct ExpandResult<'a> {
p: parse::parser::Parser<'a>,
}
impl<'a> base::MacResult for ExpandResult<'a> {
fn make_expr(mut self: Box<ExpandResult<'a>>) -> Option<P<ast::Expr>> {
Some(self.p.parse_expr())
}
fn make_items(mut self: Box<ExpandResult<'a>>)
-> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while self.p.token!= token::Eof {
match self.p.parse_item() {
Some(item) => ret.push(item),
None => panic!(self.p.span_fatal(
self.p.span,
&format!("expected item, found `{}`",
self.p.this_token_to_string())
))
}
}
Some(ret)
}
}
Box::new(ExpandResult { p: p })
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_str!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
let file = res_rel_file(cx, sp, Path::new(&file));
let mut bytes = Vec::new();
match File::open(&file).and_then(|mut f| f.read_to_end(&mut bytes)) {
Ok(..) => {}
Err(e) => {
cx.span_err(sp,
&format!("couldn't read {}: {}",
file.display(),
e));
return DummyResult::expr(sp);
}
};
match String::from_utf8(bytes) {
Ok(src) => {
// Add this input file to the code map to make it available as
// dependency information
let filename = format!("{}", file.display());
let interned = token::intern_and_get_ident(&src[..]);
cx.codemap().new_filemap(filename, src);
base::MacEager::expr(cx.expr_str(sp, interned))
}
Err(_) => {
cx.span_err(sp,
&format!("{} wasn't a utf-8 file",
file.display()));
return DummyResult::expr(sp);
}
}
}
pub fn expand_include_bytes(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_bytes!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
let file = res_rel_file(cx, sp, Path::new(&file));
let mut bytes = Vec::new();
match File::open(&file).and_then(|mut f| f.read_to_end(&mut bytes)) {
Err(e) => {
cx.span_err(sp,
&format!("couldn't read {}: {}", file.display(), e));
return DummyResult::expr(sp);
}
Ok(..) => {
// Add this input file to the code map to make it available as
// dependency information, but don't enter it's contents
let filename = format!("{}", file.display());
cx.codemap().new_filemap(filename, "".to_string());
base::MacEager::expr(cx.expr_lit(sp, ast::LitBinary(Rc::new(bytes))))
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: &mut ExtCtxt, sp: codemap::Span, arg: &Path) -> PathBuf {
// NB: relative paths are resolved relative to the compilation unit
if!arg.is_absolute() {
let mut cu = PathBuf::from(&cx.codemap().span_to_filename(sp));
cu.pop();
cu.push(arg);
cu
} else {
arg.to_path_buf()
}
}
| expand_file | identifier_name |
source_util.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{Pos, Span};
use codemap;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use parse::token;
use parse;
use print::pprust;
use ptr::P;
use util::small_vector::SmallVector;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::rc::Rc;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/// line!(): expands to the current line number
pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
base::MacEager::expr(cx.expr_u32(topmost, loc.line as u32))
}
/* column!(): expands to the current column number */
pub fn expand_column(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "column!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
base::MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32))
}
/// file!(): expands to the current filename */
/// The filemap (`loc.file`) contains a bunch more information we could spit
/// out if we wanted.
pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
let filename = token::intern_and_get_ident(&loc.file.name);
base::MacEager::expr(cx.expr_str(topmost, filename))
}
pub fn expand_stringify(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let s = pprust::tts_to_string(tts);
base::MacEager::expr(cx.expr_str(sp,
token::intern_and_get_ident(&s[..])))
}
pub fn expand_mod(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "module_path!");
let string = cx.mod_path()
.iter()
.map(|x| token::get_ident(*x).to_string())
.collect::<Vec<String>>()
.join("::");
base::MacEager::expr(cx.expr_str(
sp,
token::intern_and_get_ident(&string[..])))
}
/// include! : parse the given file as an expr
/// This is generally a bad idea because it's going to behave
/// unhygienically.
pub fn expand_include<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
Some(f) => f,
None => return DummyResult::expr(sp),
};
// The file will be added to the code map by the parser
let p =
parse::new_sub_parser_from_file(cx.parse_sess(),
cx.cfg(),
&res_rel_file(cx,
sp,
Path::new(&file)),
true,
None,
sp);
struct ExpandResult<'a> {
p: parse::parser::Parser<'a>,
}
impl<'a> base::MacResult for ExpandResult<'a> {
fn make_expr(mut self: Box<ExpandResult<'a>>) -> Option<P<ast::Expr>> {
Some(self.p.parse_expr())
}
fn make_items(mut self: Box<ExpandResult<'a>>)
-> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while self.p.token!= token::Eof {
match self.p.parse_item() {
Some(item) => ret.push(item),
None => panic!(self.p.span_fatal(
self.p.span,
&format!("expected item, found `{}`",
self.p.this_token_to_string())
))
}
}
Some(ret)
}
}
Box::new(ExpandResult { p: p })
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_str!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
let file = res_rel_file(cx, sp, Path::new(&file));
let mut bytes = Vec::new();
match File::open(&file).and_then(|mut f| f.read_to_end(&mut bytes)) {
Ok(..) => {}
Err(e) => {
cx.span_err(sp,
&format!("couldn't read {}: {}",
file.display(),
e));
return DummyResult::expr(sp);
}
};
match String::from_utf8(bytes) {
Ok(src) => |
Err(_) => {
cx.span_err(sp,
&format!("{} wasn't a utf-8 file",
file.display()));
return DummyResult::expr(sp);
}
}
}
pub fn expand_include_bytes(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_bytes!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
let file = res_rel_file(cx, sp, Path::new(&file));
let mut bytes = Vec::new();
match File::open(&file).and_then(|mut f| f.read_to_end(&mut bytes)) {
Err(e) => {
cx.span_err(sp,
&format!("couldn't read {}: {}", file.display(), e));
return DummyResult::expr(sp);
}
Ok(..) => {
// Add this input file to the code map to make it available as
// dependency information, but don't enter it's contents
let filename = format!("{}", file.display());
cx.codemap().new_filemap(filename, "".to_string());
base::MacEager::expr(cx.expr_lit(sp, ast::LitBinary(Rc::new(bytes))))
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: &mut ExtCtxt, sp: codemap::Span, arg: &Path) -> PathBuf {
// NB: relative paths are resolved relative to the compilation unit
if!arg.is_absolute() {
let mut cu = PathBuf::from(&cx.codemap().span_to_filename(sp));
cu.pop();
cu.push(arg);
cu
} else {
arg.to_path_buf()
}
}
| {
// Add this input file to the code map to make it available as
// dependency information
let filename = format!("{}", file.display());
let interned = token::intern_and_get_ident(&src[..]);
cx.codemap().new_filemap(filename, src);
base::MacEager::expr(cx.expr_str(sp, interned))
} | conditional_block |
source_util.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{Pos, Span};
use codemap;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use parse::token;
use parse;
use print::pprust;
use ptr::P;
use util::small_vector::SmallVector;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::rc::Rc;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/// line!(): expands to the current line number
pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
base::MacEager::expr(cx.expr_u32(topmost, loc.line as u32))
}
/* column!(): expands to the current column number */
pub fn expand_column(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "column!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
base::MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32))
}
/// file!(): expands to the current filename */
/// The filemap (`loc.file`) contains a bunch more information we could spit
/// out if we wanted.
pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = cx.expansion_cause();
let loc = cx.codemap().lookup_char_pos(topmost.lo);
let filename = token::intern_and_get_ident(&loc.file.name);
base::MacEager::expr(cx.expr_str(topmost, filename))
}
pub fn expand_stringify(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let s = pprust::tts_to_string(tts);
base::MacEager::expr(cx.expr_str(sp,
token::intern_and_get_ident(&s[..])))
}
pub fn expand_mod(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "module_path!");
let string = cx.mod_path()
.iter()
.map(|x| token::get_ident(*x).to_string())
.collect::<Vec<String>>()
.join("::");
base::MacEager::expr(cx.expr_str(
sp,
token::intern_and_get_ident(&string[..])))
}
/// include! : parse the given file as an expr
/// This is generally a bad idea because it's going to behave
/// unhygienically.
pub fn expand_include<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
Some(f) => f,
None => return DummyResult::expr(sp),
};
// The file will be added to the code map by the parser
let p =
parse::new_sub_parser_from_file(cx.parse_sess(),
cx.cfg(),
&res_rel_file(cx,
sp,
Path::new(&file)),
true,
None,
sp);
struct ExpandResult<'a> {
p: parse::parser::Parser<'a>,
}
impl<'a> base::MacResult for ExpandResult<'a> {
fn make_expr(mut self: Box<ExpandResult<'a>>) -> Option<P<ast::Expr>> {
Some(self.p.parse_expr())
}
fn make_items(mut self: Box<ExpandResult<'a>>)
-> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while self.p.token!= token::Eof {
match self.p.parse_item() {
Some(item) => ret.push(item),
None => panic!(self.p.span_fatal(
self.p.span,
&format!("expected item, found `{}`",
self.p.this_token_to_string())
))
}
}
Some(ret)
}
}
Box::new(ExpandResult { p: p })
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_str!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
let file = res_rel_file(cx, sp, Path::new(&file));
let mut bytes = Vec::new();
match File::open(&file).and_then(|mut f| f.read_to_end(&mut bytes)) {
Ok(..) => {}
Err(e) => {
cx.span_err(sp,
&format!("couldn't read {}: {}",
file.display(),
e));
return DummyResult::expr(sp);
}
};
match String::from_utf8(bytes) {
Ok(src) => {
// Add this input file to the code map to make it available as | cx.codemap().new_filemap(filename, src);
base::MacEager::expr(cx.expr_str(sp, interned))
}
Err(_) => {
cx.span_err(sp,
&format!("{} wasn't a utf-8 file",
file.display()));
return DummyResult::expr(sp);
}
}
}
pub fn expand_include_bytes(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_bytes!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
let file = res_rel_file(cx, sp, Path::new(&file));
let mut bytes = Vec::new();
match File::open(&file).and_then(|mut f| f.read_to_end(&mut bytes)) {
Err(e) => {
cx.span_err(sp,
&format!("couldn't read {}: {}", file.display(), e));
return DummyResult::expr(sp);
}
Ok(..) => {
// Add this input file to the code map to make it available as
// dependency information, but don't enter it's contents
let filename = format!("{}", file.display());
cx.codemap().new_filemap(filename, "".to_string());
base::MacEager::expr(cx.expr_lit(sp, ast::LitBinary(Rc::new(bytes))))
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: &mut ExtCtxt, sp: codemap::Span, arg: &Path) -> PathBuf {
// NB: relative paths are resolved relative to the compilation unit
if!arg.is_absolute() {
let mut cu = PathBuf::from(&cx.codemap().span_to_filename(sp));
cu.pop();
cu.push(arg);
cu
} else {
arg.to_path_buf()
}
} | // dependency information
let filename = format!("{}", file.display());
let interned = token::intern_and_get_ident(&src[..]); | random_line_split |
stream_chain.rs | use tokio_stream::{self as stream, Stream, StreamExt};
use tokio_test::{assert_pending, assert_ready, task};
mod support {
pub(crate) mod mpsc;
}
use support::mpsc;
#[tokio::test]
async fn basic_usage() {
let one = stream::iter(vec![1, 2, 3]);
let two = stream::iter(vec![4, 5, 6]);
let mut stream = one.chain(two);
assert_eq!(stream.size_hint(), (6, Some(6)));
assert_eq!(stream.next().await, Some(1));
assert_eq!(stream.size_hint(), (5, Some(5)));
assert_eq!(stream.next().await, Some(2));
assert_eq!(stream.size_hint(), (4, Some(4)));
assert_eq!(stream.next().await, Some(3));
assert_eq!(stream.size_hint(), (3, Some(3)));
assert_eq!(stream.next().await, Some(4));
assert_eq!(stream.size_hint(), (2, Some(2)));
assert_eq!(stream.next().await, Some(5));
assert_eq!(stream.size_hint(), (1, Some(1)));
assert_eq!(stream.next().await, Some(6));
assert_eq!(stream.size_hint(), (0, Some(0)));
assert_eq!(stream.next().await, None);
assert_eq!(stream.size_hint(), (0, Some(0)));
assert_eq!(stream.next().await, None);
}
#[tokio::test]
async fn pending_first() {
let (tx1, rx1) = mpsc::unbounded_channel_stream();
let (tx2, rx2) = mpsc::unbounded_channel_stream();
let mut stream = task::spawn(rx1.chain(rx2));
assert_eq!(stream.size_hint(), (0, None));
assert_pending!(stream.poll_next());
tx2.send(2).unwrap();
assert!(!stream.is_woken());
assert_pending!(stream.poll_next());
tx1.send(1).unwrap();
assert!(stream.is_woken()); | drop(tx1);
assert_eq!(stream.size_hint(), (0, None));
assert!(stream.is_woken());
assert_eq!(Some(2), assert_ready!(stream.poll_next()));
assert_eq!(stream.size_hint(), (0, None));
drop(tx2);
assert_eq!(stream.size_hint(), (0, None));
assert_eq!(None, assert_ready!(stream.poll_next()));
}
#[test]
fn size_overflow() {
struct Monster;
impl tokio_stream::Stream for Monster {
type Item = ();
fn poll_next(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<()>> {
panic!()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::MAX, Some(usize::MAX))
}
}
let m1 = Monster;
let m2 = Monster;
let m = m1.chain(m2);
assert_eq!(m.size_hint(), (usize::MAX, None));
} | assert_eq!(Some(1), assert_ready!(stream.poll_next()));
assert_pending!(stream.poll_next());
| random_line_split |
stream_chain.rs | use tokio_stream::{self as stream, Stream, StreamExt};
use tokio_test::{assert_pending, assert_ready, task};
mod support {
pub(crate) mod mpsc;
}
use support::mpsc;
#[tokio::test]
async fn basic_usage() {
let one = stream::iter(vec![1, 2, 3]);
let two = stream::iter(vec![4, 5, 6]);
let mut stream = one.chain(two);
assert_eq!(stream.size_hint(), (6, Some(6)));
assert_eq!(stream.next().await, Some(1));
assert_eq!(stream.size_hint(), (5, Some(5)));
assert_eq!(stream.next().await, Some(2));
assert_eq!(stream.size_hint(), (4, Some(4)));
assert_eq!(stream.next().await, Some(3));
assert_eq!(stream.size_hint(), (3, Some(3)));
assert_eq!(stream.next().await, Some(4));
assert_eq!(stream.size_hint(), (2, Some(2)));
assert_eq!(stream.next().await, Some(5));
assert_eq!(stream.size_hint(), (1, Some(1)));
assert_eq!(stream.next().await, Some(6));
assert_eq!(stream.size_hint(), (0, Some(0)));
assert_eq!(stream.next().await, None);
assert_eq!(stream.size_hint(), (0, Some(0)));
assert_eq!(stream.next().await, None);
}
#[tokio::test]
async fn pending_first() {
let (tx1, rx1) = mpsc::unbounded_channel_stream();
let (tx2, rx2) = mpsc::unbounded_channel_stream();
let mut stream = task::spawn(rx1.chain(rx2));
assert_eq!(stream.size_hint(), (0, None));
assert_pending!(stream.poll_next());
tx2.send(2).unwrap();
assert!(!stream.is_woken());
assert_pending!(stream.poll_next());
tx1.send(1).unwrap();
assert!(stream.is_woken());
assert_eq!(Some(1), assert_ready!(stream.poll_next()));
assert_pending!(stream.poll_next());
drop(tx1);
assert_eq!(stream.size_hint(), (0, None));
assert!(stream.is_woken());
assert_eq!(Some(2), assert_ready!(stream.poll_next()));
assert_eq!(stream.size_hint(), (0, None));
drop(tx2);
assert_eq!(stream.size_hint(), (0, None));
assert_eq!(None, assert_ready!(stream.poll_next()));
}
#[test]
fn size_overflow() {
struct | ;
impl tokio_stream::Stream for Monster {
type Item = ();
fn poll_next(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<()>> {
panic!()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::MAX, Some(usize::MAX))
}
}
let m1 = Monster;
let m2 = Monster;
let m = m1.chain(m2);
assert_eq!(m.size_hint(), (usize::MAX, None));
}
| Monster | identifier_name |
tests.rs | //
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate test;
use maplit::hashmap;
use oak_functions_abi::proto::{Request, ServerPolicy, StatusCode};
use oak_functions_loader::{
grpc::{create_and_start_grpc_server, create_wasm_handler, RequestModel},
logger::Logger,
lookup::LookupFactory,
lookup_data::{LookupData, LookupDataAuth, LookupDataSource},
server::WasmHandler,
};
use std::{
net::{Ipv6Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use test::Bencher;
use test_utils::{get_config_info, make_request};
#[tokio::test]
async fn test_server() {
let server_port = test_utils::free_port();
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, server_port));
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), false)
.expect("Couldn't read Wasm module");
let mock_static_server = Arc::new(test_utils::MockStaticServer::default());
let mock_static_server_clone = mock_static_server.clone();
let static_server_port = test_utils::free_port();
let mock_static_server_background = test_utils::background(|term| async move {
mock_static_server_clone
.serve(static_server_port, term)
.await
});
mock_static_server.set_response_body(test_utils::serialize_entries(hashmap! {
b"key_0".to_vec() => b"value_0".to_vec(),
b"key_1".to_vec() => b"value_1".to_vec(),
b"key_2".to_vec() => b"value_2".to_vec(),
b"empty".to_vec() => vec![],
}));
let policy = ServerPolicy {
constant_response_size_bytes: 100,
constant_processing_time_ms: 200,
};
let tee_certificate = vec![];
let logger = Logger::for_test();
let lookup_data = Arc::new(LookupData::new_empty(
Some(LookupDataSource::Http {
url: format!("http://localhost:{}", static_server_port),
auth: LookupDataAuth::default(),
}),
logger.clone(),
));
lookup_data.refresh().await.unwrap();
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler =
create_wasm_handler(&wasm_module_bytes, vec![lookup_factory], logger.clone())
.expect("could not create wasm_handler");
let server_background = test_utils::background(|term| async move {
create_and_start_grpc_server(
&address,
wasm_handler,
tee_certificate,
policy.clone(),
get_config_info(&wasm_module_bytes, policy, false, None),
term,
logger,
RequestModel::BidiStreaming,
)
.await
});
{
// Lookup match.
let response = make_request(server_port, b"key_1").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(b"value_1", response.body().unwrap(),);
}
{
// Lookup fail.
let response = make_request(server_port, b"key_42").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
{
// Lookup match but empty value.
let response = make_request(server_port, b"empty").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
let res = server_background.terminate_and_join().await;
assert!(res.is_ok());
mock_static_server_background.terminate_and_join().await;
}
#[bench]
fn bench_wasm_handler(bencher: &mut Bencher) |
let rt = tokio::runtime::Runtime::new().unwrap();
let summary = bencher.bench(|bencher| {
bencher.iter(|| {
let request = Request {
body: br#"key_1"#.to_vec(),
};
let resp = rt
.block_on(wasm_handler.clone().handle_invoke(request))
.unwrap();
assert_eq!(resp.status, StatusCode::Success as i32);
assert_eq!(std::str::from_utf8(&resp.body).unwrap(), r#"value_1"#);
});
});
// When running `cargo test` this benchmark test gets executed too, but `summary` will be `None`
// in that case. So, here we first check that `summary` is not empty.
if let Some(summary) = summary {
// `summary.mean` is in nanoseconds, even though it is not explicitly documented in
// https://doc.rust-lang.org/test/stats/struct.Summary.html.
let elapsed = Duration::from_nanos(summary.mean as u64);
// We expect the `mean` time for loading the test Wasm module and running its main function
// to be less than a fixed threshold.
assert!(
elapsed < Duration::from_millis(5),
"elapsed time: {:.0?}",
elapsed
);
}
}
| {
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), true)
.expect("Couldn't read Wasm module");
let logger = Logger::for_test();
let entries = hashmap! {
b"key_0".to_vec() => br#"value_0"#.to_vec(),
b"key_1".to_vec() => br#"value_1"#.to_vec(),
b"key_2".to_vec() => br#"value_2"#.to_vec(),
};
let lookup_data = Arc::new(LookupData::for_test(entries));
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler = WasmHandler::create(&wasm_module_bytes, vec![lookup_factory], logger)
.expect("Couldn't create the server"); | identifier_body |
tests.rs | //
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate test;
use maplit::hashmap;
use oak_functions_abi::proto::{Request, ServerPolicy, StatusCode};
use oak_functions_loader::{
grpc::{create_and_start_grpc_server, create_wasm_handler, RequestModel},
logger::Logger,
lookup::LookupFactory,
lookup_data::{LookupData, LookupDataAuth, LookupDataSource},
server::WasmHandler,
};
use std::{
net::{Ipv6Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use test::Bencher;
use test_utils::{get_config_info, make_request};
#[tokio::test]
async fn test_server() {
let server_port = test_utils::free_port();
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, server_port));
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), false)
.expect("Couldn't read Wasm module");
let mock_static_server = Arc::new(test_utils::MockStaticServer::default());
let mock_static_server_clone = mock_static_server.clone();
let static_server_port = test_utils::free_port();
let mock_static_server_background = test_utils::background(|term| async move {
mock_static_server_clone
.serve(static_server_port, term)
.await
});
mock_static_server.set_response_body(test_utils::serialize_entries(hashmap! { | b"key_0".to_vec() => b"value_0".to_vec(),
b"key_1".to_vec() => b"value_1".to_vec(),
b"key_2".to_vec() => b"value_2".to_vec(),
b"empty".to_vec() => vec![],
}));
let policy = ServerPolicy {
constant_response_size_bytes: 100,
constant_processing_time_ms: 200,
};
let tee_certificate = vec![];
let logger = Logger::for_test();
let lookup_data = Arc::new(LookupData::new_empty(
Some(LookupDataSource::Http {
url: format!("http://localhost:{}", static_server_port),
auth: LookupDataAuth::default(),
}),
logger.clone(),
));
lookup_data.refresh().await.unwrap();
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler =
create_wasm_handler(&wasm_module_bytes, vec![lookup_factory], logger.clone())
.expect("could not create wasm_handler");
let server_background = test_utils::background(|term| async move {
create_and_start_grpc_server(
&address,
wasm_handler,
tee_certificate,
policy.clone(),
get_config_info(&wasm_module_bytes, policy, false, None),
term,
logger,
RequestModel::BidiStreaming,
)
.await
});
{
// Lookup match.
let response = make_request(server_port, b"key_1").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(b"value_1", response.body().unwrap(),);
}
{
// Lookup fail.
let response = make_request(server_port, b"key_42").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
{
// Lookup match but empty value.
let response = make_request(server_port, b"empty").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
let res = server_background.terminate_and_join().await;
assert!(res.is_ok());
mock_static_server_background.terminate_and_join().await;
}
#[bench]
fn bench_wasm_handler(bencher: &mut Bencher) {
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), true)
.expect("Couldn't read Wasm module");
let logger = Logger::for_test();
let entries = hashmap! {
b"key_0".to_vec() => br#"value_0"#.to_vec(),
b"key_1".to_vec() => br#"value_1"#.to_vec(),
b"key_2".to_vec() => br#"value_2"#.to_vec(),
};
let lookup_data = Arc::new(LookupData::for_test(entries));
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler = WasmHandler::create(&wasm_module_bytes, vec![lookup_factory], logger)
.expect("Couldn't create the server");
let rt = tokio::runtime::Runtime::new().unwrap();
let summary = bencher.bench(|bencher| {
bencher.iter(|| {
let request = Request {
body: br#"key_1"#.to_vec(),
};
let resp = rt
.block_on(wasm_handler.clone().handle_invoke(request))
.unwrap();
assert_eq!(resp.status, StatusCode::Success as i32);
assert_eq!(std::str::from_utf8(&resp.body).unwrap(), r#"value_1"#);
});
});
// When running `cargo test` this benchmark test gets executed too, but `summary` will be `None`
// in that case. So, here we first check that `summary` is not empty.
if let Some(summary) = summary {
// `summary.mean` is in nanoseconds, even though it is not explicitly documented in
// https://doc.rust-lang.org/test/stats/struct.Summary.html.
let elapsed = Duration::from_nanos(summary.mean as u64);
// We expect the `mean` time for loading the test Wasm module and running its main function
// to be less than a fixed threshold.
assert!(
elapsed < Duration::from_millis(5),
"elapsed time: {:.0?}",
elapsed
);
}
} | random_line_split |
|
tests.rs | //
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate test;
use maplit::hashmap;
use oak_functions_abi::proto::{Request, ServerPolicy, StatusCode};
use oak_functions_loader::{
grpc::{create_and_start_grpc_server, create_wasm_handler, RequestModel},
logger::Logger,
lookup::LookupFactory,
lookup_data::{LookupData, LookupDataAuth, LookupDataSource},
server::WasmHandler,
};
use std::{
net::{Ipv6Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use test::Bencher;
use test_utils::{get_config_info, make_request};
#[tokio::test]
async fn test_server() {
let server_port = test_utils::free_port();
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, server_port));
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), false)
.expect("Couldn't read Wasm module");
let mock_static_server = Arc::new(test_utils::MockStaticServer::default());
let mock_static_server_clone = mock_static_server.clone();
let static_server_port = test_utils::free_port();
let mock_static_server_background = test_utils::background(|term| async move {
mock_static_server_clone
.serve(static_server_port, term)
.await
});
mock_static_server.set_response_body(test_utils::serialize_entries(hashmap! {
b"key_0".to_vec() => b"value_0".to_vec(),
b"key_1".to_vec() => b"value_1".to_vec(),
b"key_2".to_vec() => b"value_2".to_vec(),
b"empty".to_vec() => vec![],
}));
let policy = ServerPolicy {
constant_response_size_bytes: 100,
constant_processing_time_ms: 200,
};
let tee_certificate = vec![];
let logger = Logger::for_test();
let lookup_data = Arc::new(LookupData::new_empty(
Some(LookupDataSource::Http {
url: format!("http://localhost:{}", static_server_port),
auth: LookupDataAuth::default(),
}),
logger.clone(),
));
lookup_data.refresh().await.unwrap();
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler =
create_wasm_handler(&wasm_module_bytes, vec![lookup_factory], logger.clone())
.expect("could not create wasm_handler");
let server_background = test_utils::background(|term| async move {
create_and_start_grpc_server(
&address,
wasm_handler,
tee_certificate,
policy.clone(),
get_config_info(&wasm_module_bytes, policy, false, None),
term,
logger,
RequestModel::BidiStreaming,
)
.await
});
{
// Lookup match.
let response = make_request(server_port, b"key_1").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(b"value_1", response.body().unwrap(),);
}
{
// Lookup fail.
let response = make_request(server_port, b"key_42").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
{
// Lookup match but empty value.
let response = make_request(server_port, b"empty").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
let res = server_background.terminate_and_join().await;
assert!(res.is_ok());
mock_static_server_background.terminate_and_join().await;
}
#[bench]
fn bench_wasm_handler(bencher: &mut Bencher) {
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), true)
.expect("Couldn't read Wasm module");
let logger = Logger::for_test();
let entries = hashmap! {
b"key_0".to_vec() => br#"value_0"#.to_vec(),
b"key_1".to_vec() => br#"value_1"#.to_vec(),
b"key_2".to_vec() => br#"value_2"#.to_vec(),
};
let lookup_data = Arc::new(LookupData::for_test(entries));
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler = WasmHandler::create(&wasm_module_bytes, vec![lookup_factory], logger)
.expect("Couldn't create the server");
let rt = tokio::runtime::Runtime::new().unwrap();
let summary = bencher.bench(|bencher| {
bencher.iter(|| {
let request = Request {
body: br#"key_1"#.to_vec(),
};
let resp = rt
.block_on(wasm_handler.clone().handle_invoke(request))
.unwrap();
assert_eq!(resp.status, StatusCode::Success as i32);
assert_eq!(std::str::from_utf8(&resp.body).unwrap(), r#"value_1"#);
});
});
// When running `cargo test` this benchmark test gets executed too, but `summary` will be `None`
// in that case. So, here we first check that `summary` is not empty.
if let Some(summary) = summary |
}
| {
// `summary.mean` is in nanoseconds, even though it is not explicitly documented in
// https://doc.rust-lang.org/test/stats/struct.Summary.html.
let elapsed = Duration::from_nanos(summary.mean as u64);
// We expect the `mean` time for loading the test Wasm module and running its main function
// to be less than a fixed threshold.
assert!(
elapsed < Duration::from_millis(5),
"elapsed time: {:.0?}",
elapsed
);
} | conditional_block |
tests.rs | //
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate test;
use maplit::hashmap;
use oak_functions_abi::proto::{Request, ServerPolicy, StatusCode};
use oak_functions_loader::{
grpc::{create_and_start_grpc_server, create_wasm_handler, RequestModel},
logger::Logger,
lookup::LookupFactory,
lookup_data::{LookupData, LookupDataAuth, LookupDataSource},
server::WasmHandler,
};
use std::{
net::{Ipv6Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use test::Bencher;
use test_utils::{get_config_info, make_request};
#[tokio::test]
async fn | () {
let server_port = test_utils::free_port();
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, server_port));
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), false)
.expect("Couldn't read Wasm module");
let mock_static_server = Arc::new(test_utils::MockStaticServer::default());
let mock_static_server_clone = mock_static_server.clone();
let static_server_port = test_utils::free_port();
let mock_static_server_background = test_utils::background(|term| async move {
mock_static_server_clone
.serve(static_server_port, term)
.await
});
mock_static_server.set_response_body(test_utils::serialize_entries(hashmap! {
b"key_0".to_vec() => b"value_0".to_vec(),
b"key_1".to_vec() => b"value_1".to_vec(),
b"key_2".to_vec() => b"value_2".to_vec(),
b"empty".to_vec() => vec![],
}));
let policy = ServerPolicy {
constant_response_size_bytes: 100,
constant_processing_time_ms: 200,
};
let tee_certificate = vec![];
let logger = Logger::for_test();
let lookup_data = Arc::new(LookupData::new_empty(
Some(LookupDataSource::Http {
url: format!("http://localhost:{}", static_server_port),
auth: LookupDataAuth::default(),
}),
logger.clone(),
));
lookup_data.refresh().await.unwrap();
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler =
create_wasm_handler(&wasm_module_bytes, vec![lookup_factory], logger.clone())
.expect("could not create wasm_handler");
let server_background = test_utils::background(|term| async move {
create_and_start_grpc_server(
&address,
wasm_handler,
tee_certificate,
policy.clone(),
get_config_info(&wasm_module_bytes, policy, false, None),
term,
logger,
RequestModel::BidiStreaming,
)
.await
});
{
// Lookup match.
let response = make_request(server_port, b"key_1").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(b"value_1", response.body().unwrap(),);
}
{
// Lookup fail.
let response = make_request(server_port, b"key_42").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
{
// Lookup match but empty value.
let response = make_request(server_port, b"empty").await.response;
assert_eq!(StatusCode::Success as i32, response.status);
assert_eq!(Vec::<u8>::new(), response.body().unwrap());
}
let res = server_background.terminate_and_join().await;
assert!(res.is_ok());
mock_static_server_background.terminate_and_join().await;
}
#[bench]
fn bench_wasm_handler(bencher: &mut Bencher) {
let mut manifest_path = std::env::current_dir().unwrap();
manifest_path.push("Cargo.toml");
let wasm_module_bytes =
test_utils::compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), true)
.expect("Couldn't read Wasm module");
let logger = Logger::for_test();
let entries = hashmap! {
b"key_0".to_vec() => br#"value_0"#.to_vec(),
b"key_1".to_vec() => br#"value_1"#.to_vec(),
b"key_2".to_vec() => br#"value_2"#.to_vec(),
};
let lookup_data = Arc::new(LookupData::for_test(entries));
let lookup_factory = LookupFactory::new_boxed_extension_factory(lookup_data, logger.clone())
.expect("could not create LookupFactory");
let wasm_handler = WasmHandler::create(&wasm_module_bytes, vec![lookup_factory], logger)
.expect("Couldn't create the server");
let rt = tokio::runtime::Runtime::new().unwrap();
let summary = bencher.bench(|bencher| {
bencher.iter(|| {
let request = Request {
body: br#"key_1"#.to_vec(),
};
let resp = rt
.block_on(wasm_handler.clone().handle_invoke(request))
.unwrap();
assert_eq!(resp.status, StatusCode::Success as i32);
assert_eq!(std::str::from_utf8(&resp.body).unwrap(), r#"value_1"#);
});
});
// When running `cargo test` this benchmark test gets executed too, but `summary` will be `None`
// in that case. So, here we first check that `summary` is not empty.
if let Some(summary) = summary {
// `summary.mean` is in nanoseconds, even though it is not explicitly documented in
// https://doc.rust-lang.org/test/stats/struct.Summary.html.
let elapsed = Duration::from_nanos(summary.mean as u64);
// We expect the `mean` time for loading the test Wasm module and running its main function
// to be less than a fixed threshold.
assert!(
elapsed < Duration::from_millis(5),
"elapsed time: {:.0?}",
elapsed
);
}
}
| test_server | identifier_name |
cast-rfc0401.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use std::vec;
enum Simple {
A,
B,
C
}
enum Valued {
H8=163,
Z=0,
X=256,
H7=67,
}
enum ValuedSigned {
M1=-1,
P1=1
}
fn main()
{
// coercion-cast
let mut it = vec![137].into_iter();
let itr: &mut vec::IntoIter<u32> = &mut it;
assert_eq!((itr as &mut Iterator<Item=u32>).next(), Some(137));
assert_eq!((itr as &mut Iterator<Item=u32>).next(), None);
assert_eq!(Some(4u32) as Option<u32>, Some(4u32));
assert_eq!((1u32,2u32) as (u32,u32), (1,2));
// this isn't prim-int-cast. Check that it works.
assert_eq!(false as bool, false);
assert_eq!(true as bool, true);
// numeric-cast
let l: u64 = 0x8090a0b0c0d0e0f0;
let lsz: usize = l as usize;
assert_eq!(l as u32, 0xc0d0e0f0);
// numeric-cast
assert_eq!(l as u8, 0xf0);
assert_eq!(l as i8,-0x10);
assert_eq!(l as u32, 0xc0d0e0f0);
assert_eq!(l as u32 as usize as u32, l as u32);
assert_eq!(l as i32,-0x3f2f1f10); |
assert_eq!(0 as f64, 0f64);
assert_eq!(1 as f64, 1f64);
assert_eq!(l as f64, 9264081114510712022f64);
assert_eq!(l as i64 as f64, -9182662959198838444f64);
// float overflow : needs fixing
// assert_eq!(l as f32 as i64 as u64, 9264082620822882088u64);
// assert_eq!(l as i64 as f32 as i64, 9182664080220408446i64);
assert_eq!(4294967040f32 as u32, 0xffffff00u32);
assert_eq!(1.844674407370955e19f64 as u64, 0xfffffffffffff800u64);
assert_eq!(9.223372036854775e18f64 as i64, 0x7ffffffffffffc00i64);
assert_eq!(-9.223372036854776e18f64 as i64, 0x8000000000000000u64 as i64);
// addr-ptr-cast/ptr-addr-cast (thin ptr)
let p: *const [u8; 1] = lsz as *const [u8; 1];
assert_eq!(p as usize, lsz);
// ptr-ptr-cast (thin ptr)
let w: *const () = p as *const ();
assert_eq!(w as usize, lsz);
// ptr-ptr-cast (fat->thin)
let u: *const [u8] = unsafe{&*p};
assert_eq!(u as *const u8, p as *const u8);
assert_eq!(u as *const u16, p as *const u16);
// ptr-ptr-cast (Length vtables)
let mut l : [u8; 2] = [0,1];
let w: *mut [u16; 2] = &mut l as *mut [u8; 2] as *mut _;
let w: *mut [u16] = unsafe {&mut *w};
let w_u8 : *const [u8] = w as *const [u8];
assert_eq!(unsafe{&*w_u8}, &l);
let s: *mut str = w as *mut str;
let l_via_str = unsafe{&*(s as *const [u8])};
assert_eq!(&l, l_via_str);
// ptr-ptr-cast (Length vtables, check length is preserved)
let l: [[u8; 3]; 2] = [[3, 2, 6], [4, 5, 1]];
let p: *const [[u8; 3]] = &l;
let p: &[[u8; 2]] = unsafe {&*(p as *const [[u8; 2]])};
assert_eq!(p, [[3, 2], [6, 4]]);
// enum-cast
assert_eq!(Simple::A as u8, 0);
assert_eq!(Simple::B as u8, 1);
assert_eq!(Valued::H8 as i8, -93);
assert_eq!(Valued::H7 as i8, 67);
assert_eq!(Valued::Z as i8, 0);
assert_eq!(Valued::H8 as u8, 163);
assert_eq!(Valued::H7 as u8, 67);
assert_eq!(Valued::Z as u8, 0);
assert_eq!(Valued::H8 as u16, 163);
assert_eq!(Valued::Z as u16, 0);
assert_eq!(Valued::H8 as u16, 163);
assert_eq!(Valued::Z as u16, 0);
assert_eq!(ValuedSigned::M1 as u16, 65535);
assert_eq!(ValuedSigned::M1 as i16, -1);
assert_eq!(ValuedSigned::P1 as u16, 1);
assert_eq!(ValuedSigned::P1 as i16, 1);
// prim-int-cast
assert_eq!(false as u16, 0);
assert_eq!(true as u16, 1);
assert_eq!(false as i64, 0);
assert_eq!(true as i64, 1);
assert_eq!('a' as u32, 0x61);
assert_eq!('a' as u16, 0x61);
assert_eq!('a' as u8, 0x61);
assert_eq!('א' as u8, 0xd0);
assert_eq!('א' as u16, 0x5d0);
assert_eq!('א' as u32, 0x5d0);
assert_eq!('🐵' as u8, 0x35);
assert_eq!('🐵' as u16, 0xf435);
assert_eq!('🐵' as u32, 0x1f435);
assert_eq!('英' as i16, -0x7d0f);
assert_eq!('英' as u16, 0x82f1);
// u8-char-cast
assert_eq!(0x61 as char, 'a');
assert_eq!(0u8 as char, '\0');
assert_eq!(0xd7 as char, '×');
// array-ptr-cast
let x = [1,2,3];
let first : *const u32 = &x[0];
assert_eq!(first, &x as *const _);
assert_eq!(first, &x as *const u32);
// fptr-addr-cast
fn foo() {
println!("foo!");
}
fn bar() {
println!("bar!");
}
assert!(foo as usize!= bar as usize);
// Taking a few bits of a function's address is totally pointless and we detect that
// Disabling the lint to ensure that the assertion can still be run
#[allow(const_err)]
{
assert_eq!(foo as i16, foo as usize as i16);
}
// fptr-ptr-cast
assert_eq!(foo as *const u8 as usize, foo as usize);
assert!(foo as *const u32!= first);
}
fn foo() { } | assert_eq!(l as i32 as isize as i32, l as i32);
assert_eq!(l as i64,-0x7f6f5f4f3f2f1f10); | random_line_split |
cast-rfc0401.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use std::vec;
enum Simple {
A,
B,
C
}
enum | {
H8=163,
Z=0,
X=256,
H7=67,
}
enum ValuedSigned {
M1=-1,
P1=1
}
fn main()
{
// coercion-cast
let mut it = vec![137].into_iter();
let itr: &mut vec::IntoIter<u32> = &mut it;
assert_eq!((itr as &mut Iterator<Item=u32>).next(), Some(137));
assert_eq!((itr as &mut Iterator<Item=u32>).next(), None);
assert_eq!(Some(4u32) as Option<u32>, Some(4u32));
assert_eq!((1u32,2u32) as (u32,u32), (1,2));
// this isn't prim-int-cast. Check that it works.
assert_eq!(false as bool, false);
assert_eq!(true as bool, true);
// numeric-cast
let l: u64 = 0x8090a0b0c0d0e0f0;
let lsz: usize = l as usize;
assert_eq!(l as u32, 0xc0d0e0f0);
// numeric-cast
assert_eq!(l as u8, 0xf0);
assert_eq!(l as i8,-0x10);
assert_eq!(l as u32, 0xc0d0e0f0);
assert_eq!(l as u32 as usize as u32, l as u32);
assert_eq!(l as i32,-0x3f2f1f10);
assert_eq!(l as i32 as isize as i32, l as i32);
assert_eq!(l as i64,-0x7f6f5f4f3f2f1f10);
assert_eq!(0 as f64, 0f64);
assert_eq!(1 as f64, 1f64);
assert_eq!(l as f64, 9264081114510712022f64);
assert_eq!(l as i64 as f64, -9182662959198838444f64);
// float overflow : needs fixing
// assert_eq!(l as f32 as i64 as u64, 9264082620822882088u64);
// assert_eq!(l as i64 as f32 as i64, 9182664080220408446i64);
assert_eq!(4294967040f32 as u32, 0xffffff00u32);
assert_eq!(1.844674407370955e19f64 as u64, 0xfffffffffffff800u64);
assert_eq!(9.223372036854775e18f64 as i64, 0x7ffffffffffffc00i64);
assert_eq!(-9.223372036854776e18f64 as i64, 0x8000000000000000u64 as i64);
// addr-ptr-cast/ptr-addr-cast (thin ptr)
let p: *const [u8; 1] = lsz as *const [u8; 1];
assert_eq!(p as usize, lsz);
// ptr-ptr-cast (thin ptr)
let w: *const () = p as *const ();
assert_eq!(w as usize, lsz);
// ptr-ptr-cast (fat->thin)
let u: *const [u8] = unsafe{&*p};
assert_eq!(u as *const u8, p as *const u8);
assert_eq!(u as *const u16, p as *const u16);
// ptr-ptr-cast (Length vtables)
let mut l : [u8; 2] = [0,1];
let w: *mut [u16; 2] = &mut l as *mut [u8; 2] as *mut _;
let w: *mut [u16] = unsafe {&mut *w};
let w_u8 : *const [u8] = w as *const [u8];
assert_eq!(unsafe{&*w_u8}, &l);
let s: *mut str = w as *mut str;
let l_via_str = unsafe{&*(s as *const [u8])};
assert_eq!(&l, l_via_str);
// ptr-ptr-cast (Length vtables, check length is preserved)
let l: [[u8; 3]; 2] = [[3, 2, 6], [4, 5, 1]];
let p: *const [[u8; 3]] = &l;
let p: &[[u8; 2]] = unsafe {&*(p as *const [[u8; 2]])};
assert_eq!(p, [[3, 2], [6, 4]]);
// enum-cast
assert_eq!(Simple::A as u8, 0);
assert_eq!(Simple::B as u8, 1);
assert_eq!(Valued::H8 as i8, -93);
assert_eq!(Valued::H7 as i8, 67);
assert_eq!(Valued::Z as i8, 0);
assert_eq!(Valued::H8 as u8, 163);
assert_eq!(Valued::H7 as u8, 67);
assert_eq!(Valued::Z as u8, 0);
assert_eq!(Valued::H8 as u16, 163);
assert_eq!(Valued::Z as u16, 0);
assert_eq!(Valued::H8 as u16, 163);
assert_eq!(Valued::Z as u16, 0);
assert_eq!(ValuedSigned::M1 as u16, 65535);
assert_eq!(ValuedSigned::M1 as i16, -1);
assert_eq!(ValuedSigned::P1 as u16, 1);
assert_eq!(ValuedSigned::P1 as i16, 1);
// prim-int-cast
assert_eq!(false as u16, 0);
assert_eq!(true as u16, 1);
assert_eq!(false as i64, 0);
assert_eq!(true as i64, 1);
assert_eq!('a' as u32, 0x61);
assert_eq!('a' as u16, 0x61);
assert_eq!('a' as u8, 0x61);
assert_eq!('א' as u8, 0xd0);
assert_eq!('א' as u16, 0x5d0);
assert_eq!('א' as u32, 0x5d0);
assert_eq!('🐵' as u8, 0x35);
assert_eq!('🐵' as u16, 0xf435);
assert_eq!('🐵' as u32, 0x1f435);
assert_eq!('英' as i16, -0x7d0f);
assert_eq!('英' as u16, 0x82f1);
// u8-char-cast
assert_eq!(0x61 as char, 'a');
assert_eq!(0u8 as char, '\0');
assert_eq!(0xd7 as char, '×');
// array-ptr-cast
let x = [1,2,3];
let first : *const u32 = &x[0];
assert_eq!(first, &x as *const _);
assert_eq!(first, &x as *const u32);
// fptr-addr-cast
fn foo() {
println!("foo!");
}
fn bar() {
println!("bar!");
}
assert!(foo as usize!= bar as usize);
// Taking a few bits of a function's address is totally pointless and we detect that
// Disabling the lint to ensure that the assertion can still be run
#[allow(const_err)]
{
assert_eq!(foo as i16, foo as usize as i16);
}
// fptr-ptr-cast
assert_eq!(foo as *const u8 as usize, foo as usize);
assert!(foo as *const u32!= first);
}
fn foo() { }
| Valued | identifier_name |
cast-rfc0401.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use std::vec;
enum Simple {
A,
B,
C
}
enum Valued {
H8=163,
Z=0,
X=256,
H7=67,
}
enum ValuedSigned {
M1=-1,
P1=1
}
fn main()
{
// coercion-cast
let mut it = vec![137].into_iter();
let itr: &mut vec::IntoIter<u32> = &mut it;
assert_eq!((itr as &mut Iterator<Item=u32>).next(), Some(137));
assert_eq!((itr as &mut Iterator<Item=u32>).next(), None);
assert_eq!(Some(4u32) as Option<u32>, Some(4u32));
assert_eq!((1u32,2u32) as (u32,u32), (1,2));
// this isn't prim-int-cast. Check that it works.
assert_eq!(false as bool, false);
assert_eq!(true as bool, true);
// numeric-cast
let l: u64 = 0x8090a0b0c0d0e0f0;
let lsz: usize = l as usize;
assert_eq!(l as u32, 0xc0d0e0f0);
// numeric-cast
assert_eq!(l as u8, 0xf0);
assert_eq!(l as i8,-0x10);
assert_eq!(l as u32, 0xc0d0e0f0);
assert_eq!(l as u32 as usize as u32, l as u32);
assert_eq!(l as i32,-0x3f2f1f10);
assert_eq!(l as i32 as isize as i32, l as i32);
assert_eq!(l as i64,-0x7f6f5f4f3f2f1f10);
assert_eq!(0 as f64, 0f64);
assert_eq!(1 as f64, 1f64);
assert_eq!(l as f64, 9264081114510712022f64);
assert_eq!(l as i64 as f64, -9182662959198838444f64);
// float overflow : needs fixing
// assert_eq!(l as f32 as i64 as u64, 9264082620822882088u64);
// assert_eq!(l as i64 as f32 as i64, 9182664080220408446i64);
assert_eq!(4294967040f32 as u32, 0xffffff00u32);
assert_eq!(1.844674407370955e19f64 as u64, 0xfffffffffffff800u64);
assert_eq!(9.223372036854775e18f64 as i64, 0x7ffffffffffffc00i64);
assert_eq!(-9.223372036854776e18f64 as i64, 0x8000000000000000u64 as i64);
// addr-ptr-cast/ptr-addr-cast (thin ptr)
let p: *const [u8; 1] = lsz as *const [u8; 1];
assert_eq!(p as usize, lsz);
// ptr-ptr-cast (thin ptr)
let w: *const () = p as *const ();
assert_eq!(w as usize, lsz);
// ptr-ptr-cast (fat->thin)
let u: *const [u8] = unsafe{&*p};
assert_eq!(u as *const u8, p as *const u8);
assert_eq!(u as *const u16, p as *const u16);
// ptr-ptr-cast (Length vtables)
let mut l : [u8; 2] = [0,1];
let w: *mut [u16; 2] = &mut l as *mut [u8; 2] as *mut _;
let w: *mut [u16] = unsafe {&mut *w};
let w_u8 : *const [u8] = w as *const [u8];
assert_eq!(unsafe{&*w_u8}, &l);
let s: *mut str = w as *mut str;
let l_via_str = unsafe{&*(s as *const [u8])};
assert_eq!(&l, l_via_str);
// ptr-ptr-cast (Length vtables, check length is preserved)
let l: [[u8; 3]; 2] = [[3, 2, 6], [4, 5, 1]];
let p: *const [[u8; 3]] = &l;
let p: &[[u8; 2]] = unsafe {&*(p as *const [[u8; 2]])};
assert_eq!(p, [[3, 2], [6, 4]]);
// enum-cast
assert_eq!(Simple::A as u8, 0);
assert_eq!(Simple::B as u8, 1);
assert_eq!(Valued::H8 as i8, -93);
assert_eq!(Valued::H7 as i8, 67);
assert_eq!(Valued::Z as i8, 0);
assert_eq!(Valued::H8 as u8, 163);
assert_eq!(Valued::H7 as u8, 67);
assert_eq!(Valued::Z as u8, 0);
assert_eq!(Valued::H8 as u16, 163);
assert_eq!(Valued::Z as u16, 0);
assert_eq!(Valued::H8 as u16, 163);
assert_eq!(Valued::Z as u16, 0);
assert_eq!(ValuedSigned::M1 as u16, 65535);
assert_eq!(ValuedSigned::M1 as i16, -1);
assert_eq!(ValuedSigned::P1 as u16, 1);
assert_eq!(ValuedSigned::P1 as i16, 1);
// prim-int-cast
assert_eq!(false as u16, 0);
assert_eq!(true as u16, 1);
assert_eq!(false as i64, 0);
assert_eq!(true as i64, 1);
assert_eq!('a' as u32, 0x61);
assert_eq!('a' as u16, 0x61);
assert_eq!('a' as u8, 0x61);
assert_eq!('א' as u8, 0xd0);
assert_eq!('א' as u16, 0x5d0);
assert_eq!('א' as u32, 0x5d0);
assert_eq!('🐵' as u8, 0x35);
assert_eq!('🐵' as u16, 0xf435);
assert_eq!('🐵' as u32, 0x1f435);
assert_eq!('英' as i16, -0x7d0f);
assert_eq!('英' as u16, 0x82f1);
// u8-char-cast
assert_eq!(0x61 as char, 'a');
assert_eq!(0u8 as char, '\0');
assert_eq!(0xd7 as char, '×');
// array-ptr-cast
let x = [1,2,3];
let first : *const u32 = &x[0];
assert_eq!(first, &x as *const _);
assert_eq!(first, &x as *const u32);
// fptr-addr-cast
fn foo() {
println!("foo!");
}
fn bar() {
println!("bar!");
}
assert!(foo as usize!= bar as usize);
// Taking a few bits of a function's address is totally pointless and we detect that
// Disabling the lint to ensure that the assertion can still be run
#[allow(const_err)]
{
assert_eq!(foo as i16, foo as usize as i16);
}
// fptr-ptr-cast
assert_eq!(foo as *const u8 as usize, foo as usize);
assert!(foo as *const u32!= first);
}
fn foo() { }
| identifier_body |
||
reachable-unnameable-type-alias.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(staged_api)]
#![stable(feature = "a", since = "b")]
mod inner_private_module {
// UnnameableTypeAlias isn't marked as reachable, so no stability annotation is required here
pub type UnnameableTypeAlias = u8;
}
#[stable(feature = "a", since = "b")]
pub fn f() -> inner_private_module::UnnameableTypeAlias {
0
}
fn | () {}
| main | identifier_name |
reachable-unnameable-type-alias.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(staged_api)]
#![stable(feature = "a", since = "b")]
mod inner_private_module {
// UnnameableTypeAlias isn't marked as reachable, so no stability annotation is required here
pub type UnnameableTypeAlias = u8;
}
#[stable(feature = "a", since = "b")]
pub fn f() -> inner_private_module::UnnameableTypeAlias {
0
}
fn main() {} | random_line_split |
|
reachable-unnameable-type-alias.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(staged_api)]
#![stable(feature = "a", since = "b")]
mod inner_private_module {
// UnnameableTypeAlias isn't marked as reachable, so no stability annotation is required here
pub type UnnameableTypeAlias = u8;
}
#[stable(feature = "a", since = "b")]
pub fn f() -> inner_private_module::UnnameableTypeAlias |
fn main() {}
| {
0
} | identifier_body |
spiral-matrix.rs | use spiral_matrix::*;
#[test]
fn empty_spiral() {
let expected: Vec<Vec<u32>> = Vec::new();
assert_eq!(spiral_matrix(0), expected);
}
#[test]
#[ignore]
fn size_one_spiral() {
let expected: Vec<Vec<u32>> = vec![vec![1]];
assert_eq!(spiral_matrix(1), expected);
}
#[test]
#[ignore]
fn size_two_spiral() {
let expected: Vec<Vec<u32>> = vec![vec![1, 2], vec![4, 3]];
assert_eq!(spiral_matrix(2), expected);
}
#[test]
#[ignore]
fn size_three_spiral() {
#[rustfmt::skip] | ];
assert_eq!(spiral_matrix(3), expected);
}
#[test]
#[ignore]
fn size_four_spiral() {
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3, 4],
vec![12, 13, 14, 5],
vec![11, 16, 15, 6],
vec![10, 9, 8, 7],
];
assert_eq!(spiral_matrix(4), expected);
}
#[test]
#[ignore]
fn size_five_spiral() {
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3, 4, 5],
vec![16, 17, 18, 19, 6],
vec![15, 24, 25, 20, 7],
vec![14, 23, 22, 21, 8],
vec![13, 12, 11, 10, 9],
];
assert_eq!(spiral_matrix(5), expected);
} | let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3],
vec![8, 9, 4],
vec![7, 6, 5], | random_line_split |
spiral-matrix.rs | use spiral_matrix::*;
#[test]
fn empty_spiral() |
#[test]
#[ignore]
fn size_one_spiral() {
let expected: Vec<Vec<u32>> = vec![vec![1]];
assert_eq!(spiral_matrix(1), expected);
}
#[test]
#[ignore]
fn size_two_spiral() {
let expected: Vec<Vec<u32>> = vec![vec![1, 2], vec![4, 3]];
assert_eq!(spiral_matrix(2), expected);
}
#[test]
#[ignore]
fn size_three_spiral() {
#[rustfmt::skip]
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3],
vec![8, 9, 4],
vec![7, 6, 5],
];
assert_eq!(spiral_matrix(3), expected);
}
#[test]
#[ignore]
fn size_four_spiral() {
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3, 4],
vec![12, 13, 14, 5],
vec![11, 16, 15, 6],
vec![10, 9, 8, 7],
];
assert_eq!(spiral_matrix(4), expected);
}
#[test]
#[ignore]
fn size_five_spiral() {
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3, 4, 5],
vec![16, 17, 18, 19, 6],
vec![15, 24, 25, 20, 7],
vec![14, 23, 22, 21, 8],
vec![13, 12, 11, 10, 9],
];
assert_eq!(spiral_matrix(5), expected);
}
| {
let expected: Vec<Vec<u32>> = Vec::new();
assert_eq!(spiral_matrix(0), expected);
} | identifier_body |
spiral-matrix.rs | use spiral_matrix::*;
#[test]
fn empty_spiral() {
let expected: Vec<Vec<u32>> = Vec::new();
assert_eq!(spiral_matrix(0), expected);
}
#[test]
#[ignore]
fn size_one_spiral() {
let expected: Vec<Vec<u32>> = vec![vec![1]];
assert_eq!(spiral_matrix(1), expected);
}
#[test]
#[ignore]
fn size_two_spiral() {
let expected: Vec<Vec<u32>> = vec![vec![1, 2], vec![4, 3]];
assert_eq!(spiral_matrix(2), expected);
}
#[test]
#[ignore]
fn size_three_spiral() {
#[rustfmt::skip]
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3],
vec![8, 9, 4],
vec![7, 6, 5],
];
assert_eq!(spiral_matrix(3), expected);
}
#[test]
#[ignore]
fn size_four_spiral() {
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3, 4],
vec![12, 13, 14, 5],
vec![11, 16, 15, 6],
vec![10, 9, 8, 7],
];
assert_eq!(spiral_matrix(4), expected);
}
#[test]
#[ignore]
fn | () {
let expected: Vec<Vec<u32>> = vec![
vec![1, 2, 3, 4, 5],
vec![16, 17, 18, 19, 6],
vec![15, 24, 25, 20, 7],
vec![14, 23, 22, 21, 8],
vec![13, 12, 11, 10, 9],
];
assert_eq!(spiral_matrix(5), expected);
}
| size_five_spiral | identifier_name |
date.rs | use header::HttpDate;
header! {
#[doc="`Date` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.1.1.2)"]
#[doc=""]
#[doc="The `Date` header field represents the date and time at which the"]
#[doc="message was originated."]
#[doc=""]
#[doc="# ABNF"]
#[doc="```plain"]
#[doc="Date = HTTP-date"]
#[doc="```"]
#[doc=""]
#[doc="# Example values"]
#[doc="* `Tue, 15 Nov 1994 08:12:31 GMT`"]
#[doc=""]
#[doc="# Example"]
#[doc="```"]
#[doc="# extern crate time;"]
#[doc="# extern crate hyper;"]
#[doc="# fn main() {"]
#[doc="// extern crate time;"]
#[doc=""]
#[doc="use hyper::header::{Headers, Date, HttpDate};"]
#[doc="use time;"]
#[doc=""]
#[doc="let mut headers = Headers::new();"]
#[doc="headers.set(Date(HttpDate(time::now())));"]
#[doc="# }"]
#[doc="```"]
(Date, "Date") => [HttpDate]
test_date { | }
bench_header!(imf_fixdate, Date, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] });
bench_header!(rfc_850, Date, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] });
bench_header!(asctime, Date, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); | test_header!(test1, vec![b"Tue, 15 Nov 1994 08:12:31 GMT"]);
} | random_line_split |
lib.rs | //! Bindings for [QtGui](http://doc.qt.io/qt-5/qtgui-module.html) library.
//! | //! This crate was generated using [cpp_to_rust](https://github.com/rust-qt/cpp_to_rust).
//!
//! This is work in progress, so the API will significantly change in the future.
//! Some methods are missing, and some are inconvenient to use.
//! Some methods are unsafe even though they are not marked as unsafe.
//! Users must carefully track ownership of the objects, as usual Rust guarantees
//! do not take effect. This will hopefully improve in the future.
//! Please report any issues to the
//! [issue tracker](https://github.com/rust-qt/cpp_to_rust/issues).
//!
//! This crate was generated for **Qt {cpp_to_rust.cpp_lib_version}**.
//! If Qt compatibility guarantees take effect, it should be compatible
//! with future minor releases and with past and future patch releases,
//! but API added in future releases won't be available. The crate is not compatible
//! with past minor Qt releases. If you need to use a Qt version incompatible with this crate,
//! use [qt_generator](https://github.com/rust-qt/cpp_to_rust/tree/master/qt_generator/qt_generator)
//! to generate crates for your Qt version.
//!
//! Refer to `qt_core` crate documentation for general information about Qt crates.
//! Note that if you use `qt_widgets`, you should use `qt_widgets::application::Application`
//! as the application object, and if you use `qt_gui` but not `qt_widgets`, you should use
//! `qt_gui::gui_application::GuiApplication`.
include_generated!(); | random_line_split |
|
enter_try.rs | // https://rustbyexample.com/error/multiple_error_types/enter_try.html
// http://rust-lang-ja.org/rust-by-example/error/option_with_result/enter_try.html
// Use `String` as our error type
type Result<T> = std::result::Result<T, String>;
fn double_first(vec: Vec<&str>) -> Result<i32> {
let first = try!(vec.first()
.ok_or("Please use a vector with at least one element.".to_owned()));
let value = try!(first.parse::<i32>()
.map_err(|e| e.to_string()));
Ok(2 * value)
}
fn | (result: Result<i32>) {
match result {
Ok(n) => println!("The first doubled is {}", n),
Err(e) => println!("Error: {}", e),
}
}
fn main() {
let empty = vec![];
let strings = vec!["tofu", "93", "18"];
print(double_first(empty));
print(double_first(strings));
}
| print | identifier_name |
enter_try.rs | // https://rustbyexample.com/error/multiple_error_types/enter_try.html
// http://rust-lang-ja.org/rust-by-example/error/option_with_result/enter_try.html
// Use `String` as our error type
type Result<T> = std::result::Result<T, String>;
fn double_first(vec: Vec<&str>) -> Result<i32> {
let first = try!(vec.first()
.ok_or("Please use a vector with at least one element.".to_owned()));
let value = try!(first.parse::<i32>()
.map_err(|e| e.to_string()));
Ok(2 * value)
}
fn print(result: Result<i32>) |
fn main() {
let empty = vec![];
let strings = vec!["tofu", "93", "18"];
print(double_first(empty));
print(double_first(strings));
}
| {
match result {
Ok(n) => println!("The first doubled is {}", n),
Err(e) => println!("Error: {}", e),
}
} | identifier_body |
enter_try.rs | // https://rustbyexample.com/error/multiple_error_types/enter_try.html
// http://rust-lang-ja.org/rust-by-example/error/option_with_result/enter_try.html
// Use `String` as our error type
type Result<T> = std::result::Result<T, String>;
fn double_first(vec: Vec<&str>) -> Result<i32> {
let first = try!(vec.first()
.ok_or("Please use a vector with at least one element.".to_owned()));
let value = try!(first.parse::<i32>() | Ok(2 * value)
}
fn print(result: Result<i32>) {
match result {
Ok(n) => println!("The first doubled is {}", n),
Err(e) => println!("Error: {}", e),
}
}
fn main() {
let empty = vec![];
let strings = vec!["tofu", "93", "18"];
print(double_first(empty));
print(double_first(strings));
} | .map_err(|e| e.to_string()));
| random_line_split |
gc.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use autocxx_parser::IncludeCppConfig;
use crate::{conversion::api::Api, types::QualifiedName};
use super::fun::FnPhase;
/// This is essentially mark-and-sweep garbage collection of the
/// [Api]s that we've discovered. Why do we do this, you might wonder?
/// It seems a bit strange given that we pass an explicit allowlist
/// to bindgen.
/// There are two circumstances under which we want to discard
/// some of the APIs we encounter parsing the bindgen.
/// 1) We simplify some struct to be non-POD. In this case, we'll
/// discard all the fields within it. Those fields can be, and
/// in fact often _are_, stuff which we have trouble converting
/// e.g. std::string or std::string::value_type or
/// my_derived_thing<std::basic_string::value_type> or some
/// other permutation. In such cases, we want to discard those
/// field types with prejudice.
/// 2) block! may be used to ban certain APIs. This often eliminates
/// some methods from a given struct/class. In which case, we
/// don't care about the other parameter types passed into those
/// APIs either.
pub(crate) fn filter_apis_by_following_edges_from_allowlist(
mut apis: Vec<Api<FnPhase>>,
config: &IncludeCppConfig,
) -> Vec<Api<FnPhase>> {
let mut todos: Vec<QualifiedName> = apis
.iter()
.filter(|api| {
let tnforal = api.typename_for_allowlist();
config.is_on_allowlist(&tnforal.to_cpp_name())
})
.map(Api::name)
.cloned()
.collect();
let mut by_typename: HashMap<QualifiedName, Vec<Api<FnPhase>>> = HashMap::new();
for api in apis.drain(..) {
let tn = api.name().clone();
by_typename.entry(tn).or_default().push(api);
}
let mut done = HashSet::new();
let mut output = Vec::new(); | if let Some(mut these_apis) = by_typename.remove(&todo) {
todos.extend(these_apis.iter().flat_map(|api| api.deps()));
output.append(&mut these_apis);
} // otherwise, probably an intrinsic e.g. uint32_t.
done.insert(todo);
}
output
} | while !todos.is_empty() {
let todo = todos.remove(0);
if done.contains(&todo) {
continue;
} | random_line_split |
gc.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use autocxx_parser::IncludeCppConfig;
use crate::{conversion::api::Api, types::QualifiedName};
use super::fun::FnPhase;
/// This is essentially mark-and-sweep garbage collection of the
/// [Api]s that we've discovered. Why do we do this, you might wonder?
/// It seems a bit strange given that we pass an explicit allowlist
/// to bindgen.
/// There are two circumstances under which we want to discard
/// some of the APIs we encounter parsing the bindgen.
/// 1) We simplify some struct to be non-POD. In this case, we'll
/// discard all the fields within it. Those fields can be, and
/// in fact often _are_, stuff which we have trouble converting
/// e.g. std::string or std::string::value_type or
/// my_derived_thing<std::basic_string::value_type> or some
/// other permutation. In such cases, we want to discard those
/// field types with prejudice.
/// 2) block! may be used to ban certain APIs. This often eliminates
/// some methods from a given struct/class. In which case, we
/// don't care about the other parameter types passed into those
/// APIs either.
pub(crate) fn filter_apis_by_following_edges_from_allowlist(
mut apis: Vec<Api<FnPhase>>,
config: &IncludeCppConfig,
) -> Vec<Api<FnPhase>> | continue;
}
if let Some(mut these_apis) = by_typename.remove(&todo) {
todos.extend(these_apis.iter().flat_map(|api| api.deps()));
output.append(&mut these_apis);
} // otherwise, probably an intrinsic e.g. uint32_t.
done.insert(todo);
}
output
}
| {
let mut todos: Vec<QualifiedName> = apis
.iter()
.filter(|api| {
let tnforal = api.typename_for_allowlist();
config.is_on_allowlist(&tnforal.to_cpp_name())
})
.map(Api::name)
.cloned()
.collect();
let mut by_typename: HashMap<QualifiedName, Vec<Api<FnPhase>>> = HashMap::new();
for api in apis.drain(..) {
let tn = api.name().clone();
by_typename.entry(tn).or_default().push(api);
}
let mut done = HashSet::new();
let mut output = Vec::new();
while !todos.is_empty() {
let todo = todos.remove(0);
if done.contains(&todo) { | identifier_body |
gc.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use autocxx_parser::IncludeCppConfig;
use crate::{conversion::api::Api, types::QualifiedName};
use super::fun::FnPhase;
/// This is essentially mark-and-sweep garbage collection of the
/// [Api]s that we've discovered. Why do we do this, you might wonder?
/// It seems a bit strange given that we pass an explicit allowlist
/// to bindgen.
/// There are two circumstances under which we want to discard
/// some of the APIs we encounter parsing the bindgen.
/// 1) We simplify some struct to be non-POD. In this case, we'll
/// discard all the fields within it. Those fields can be, and
/// in fact often _are_, stuff which we have trouble converting
/// e.g. std::string or std::string::value_type or
/// my_derived_thing<std::basic_string::value_type> or some
/// other permutation. In such cases, we want to discard those
/// field types with prejudice.
/// 2) block! may be used to ban certain APIs. This often eliminates
/// some methods from a given struct/class. In which case, we
/// don't care about the other parameter types passed into those
/// APIs either.
pub(crate) fn | (
mut apis: Vec<Api<FnPhase>>,
config: &IncludeCppConfig,
) -> Vec<Api<FnPhase>> {
let mut todos: Vec<QualifiedName> = apis
.iter()
.filter(|api| {
let tnforal = api.typename_for_allowlist();
config.is_on_allowlist(&tnforal.to_cpp_name())
})
.map(Api::name)
.cloned()
.collect();
let mut by_typename: HashMap<QualifiedName, Vec<Api<FnPhase>>> = HashMap::new();
for api in apis.drain(..) {
let tn = api.name().clone();
by_typename.entry(tn).or_default().push(api);
}
let mut done = HashSet::new();
let mut output = Vec::new();
while!todos.is_empty() {
let todo = todos.remove(0);
if done.contains(&todo) {
continue;
}
if let Some(mut these_apis) = by_typename.remove(&todo) {
todos.extend(these_apis.iter().flat_map(|api| api.deps()));
output.append(&mut these_apis);
} // otherwise, probably an intrinsic e.g. uint32_t.
done.insert(todo);
}
output
}
| filter_apis_by_following_edges_from_allowlist | identifier_name |
gc.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{HashMap, HashSet};
use autocxx_parser::IncludeCppConfig;
use crate::{conversion::api::Api, types::QualifiedName};
use super::fun::FnPhase;
/// This is essentially mark-and-sweep garbage collection of the
/// [Api]s that we've discovered. Why do we do this, you might wonder?
/// It seems a bit strange given that we pass an explicit allowlist
/// to bindgen.
/// There are two circumstances under which we want to discard
/// some of the APIs we encounter parsing the bindgen.
/// 1) We simplify some struct to be non-POD. In this case, we'll
/// discard all the fields within it. Those fields can be, and
/// in fact often _are_, stuff which we have trouble converting
/// e.g. std::string or std::string::value_type or
/// my_derived_thing<std::basic_string::value_type> or some
/// other permutation. In such cases, we want to discard those
/// field types with prejudice.
/// 2) block! may be used to ban certain APIs. This often eliminates
/// some methods from a given struct/class. In which case, we
/// don't care about the other parameter types passed into those
/// APIs either.
pub(crate) fn filter_apis_by_following_edges_from_allowlist(
mut apis: Vec<Api<FnPhase>>,
config: &IncludeCppConfig,
) -> Vec<Api<FnPhase>> {
let mut todos: Vec<QualifiedName> = apis
.iter()
.filter(|api| {
let tnforal = api.typename_for_allowlist();
config.is_on_allowlist(&tnforal.to_cpp_name())
})
.map(Api::name)
.cloned()
.collect();
let mut by_typename: HashMap<QualifiedName, Vec<Api<FnPhase>>> = HashMap::new();
for api in apis.drain(..) {
let tn = api.name().clone();
by_typename.entry(tn).or_default().push(api);
}
let mut done = HashSet::new();
let mut output = Vec::new();
while!todos.is_empty() {
let todo = todos.remove(0);
if done.contains(&todo) |
if let Some(mut these_apis) = by_typename.remove(&todo) {
todos.extend(these_apis.iter().flat_map(|api| api.deps()));
output.append(&mut these_apis);
} // otherwise, probably an intrinsic e.g. uint32_t.
done.insert(todo);
}
output
}
| {
continue;
} | conditional_block |
const-param-elided-lifetime.rs | // Elided lifetimes within the type of a const generic parameters is disallowed. This matches the
// behaviour of trait bounds where `fn foo<T: Ord<&u8>>() {}` is illegal. Though we could change
// elided lifetimes within the type of a const generic parameters to be'static, like elided
// lifetimes within const/static items. | // revisions: full min
#![cfg_attr(full, feature(adt_const_params))]
#![cfg_attr(full, allow(incomplete_features))]
struct A<const N: &u8>;
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
trait B {}
impl<const N: &u8> A<N> {
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
fn foo<const M: &u8>(&self) {}
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
}
impl<const N: &u8> B for A<N> {}
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
fn bar<const N: &u8>() {}
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
fn main() {} | random_line_split |
|
const-param-elided-lifetime.rs | // Elided lifetimes within the type of a const generic parameters is disallowed. This matches the
// behaviour of trait bounds where `fn foo<T: Ord<&u8>>() {}` is illegal. Though we could change
// elided lifetimes within the type of a const generic parameters to be'static, like elided
// lifetimes within const/static items.
// revisions: full min
#![cfg_attr(full, feature(adt_const_params))]
#![cfg_attr(full, allow(incomplete_features))]
struct | <const N: &u8>;
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
trait B {}
impl<const N: &u8> A<N> {
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
fn foo<const M: &u8>(&self) {}
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
}
impl<const N: &u8> B for A<N> {}
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
fn bar<const N: &u8>() {}
//~^ ERROR `&` without an explicit lifetime name cannot be used here
//[min]~^^ ERROR `&'static u8` is forbidden
fn main() {}
| A | identifier_name |
mod.rs | // Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! Handling log-probabilities.
pub mod cdf;
use std::mem;
use std::f64;
use std::iter;
use std::ops::{Add, Sub, Mul, Div};
use itertools_num::linspace;
use itertools::Itertools;
use num_traits::Float;
/// A factor to convert log-probabilities to PHRED-scale (phred = p * `LOG_TO_PHRED_FACTOR`).
const LOG_TO_PHRED_FACTOR: f64 = -4.3429448190325175; // -10 * 1 / ln(10)
/// A factor to convert PHRED-scale to log-probabilities (p = phred * `PHRED_TO_LOG_FACTOR`).
const PHRED_TO_LOG_FACTOR: f64 = -0.23025850929940456; // 1 / (-10 * log10(e))
/// Calculate log(1 - p) with p given in log space without loss of precision as described in
/// http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf.
fn ln_1m_exp(p: f64) -> f64 {
assert!(p <= 0.0);
if p < -0.693 {
(-p.exp()).ln_1p()
} else {
(-p.exp_m1()).ln()
}
}
custom_derive! {
/// A newtype for probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::Prob;
///
/// let p = Prob(0.5);
/// let q = Prob(0.2);
///
/// assert_relative_eq!(*(p + q), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
NewtypeMul(*),
NewtypeDiv(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
Default,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct Prob(pub f64);
}
| Ok(Prob(p))
} else {
Err(ProbError::InvalidProb(p))
}
}
}
custom_derive! {
/// A newtype for log-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{LogProb, Prob};
///
/// // convert from probability
/// let p = LogProb::from(Prob(0.5));
/// // convert manually
/// let q = LogProb(0.2f64.ln());
/// // obtain zero probability in log-space
/// let o = LogProb::ln_one();
///
/// assert_relative_eq!(*Prob::from(p.ln_add_exp(q) + o), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct LogProb(pub f64);
}
custom_derive! {
/// A newtype for PHRED-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{PHREDProb, Prob};
///
/// let p = PHREDProb::from(Prob(0.5));
///
/// assert_relative_eq!(*Prob::from(p), *Prob(0.5));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct PHREDProb(pub f64);
}
/// Iterator returned by scans over logprobs.
pub type ScanIter<I> = iter::Scan<<I as IntoIterator>::IntoIter,
LogProb,
fn(&mut LogProb, LogProb) -> Option<LogProb>>;
static LOGPROB_LN_ZERO: LogProb = LogProb(f64::NEG_INFINITY);
static LOGPROB_LN_ONE: LogProb = LogProb(0.0);
impl LogProb {
/// Log-space representation of Pr=0
pub fn ln_zero() -> LogProb {
LOGPROB_LN_ZERO
}
/// Log-space representation of Pr=1
pub fn ln_one() -> LogProb {
LOGPROB_LN_ONE
}
/// Numerically stable calculation of 1 - p in log-space.
pub fn ln_one_minus_exp(&self) -> LogProb {
LogProb(ln_1m_exp(**self))
}
/// Numerically stable sum of probabilities in log-space.
pub fn ln_sum_exp(probs: &[LogProb]) -> LogProb {
if probs.is_empty() {
Self::ln_zero()
} else {
let mut pmax = probs[0];
let mut imax = 0;
for (i, &p) in probs.iter().enumerate().skip(1) {
if p > pmax {
pmax = p;
imax = i;
}
}
if pmax == Self::ln_zero() {
Self::ln_zero()
} else if *pmax == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
// TODO use sum() once it has been stabilized:.sum::<usize>()
pmax +
LogProb((probs
.iter()
.enumerate()
.filter_map(|(i, p)| if i == imax {
None
} else {
Some((p - pmax).exp())
})
.fold(0.0, |s, e| s + e))
.ln_1p())
}
}
}
/// Numerically stable addition probabilities in log-space.
pub fn ln_add_exp(self, other: LogProb) -> LogProb {
let (mut p0, mut p1) = (self, other);
if p1 > p0 {
mem::swap(&mut p0, &mut p1);
}
if p0 == Self::ln_zero() {
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + LogProb((p1 - p0).exp().ln_1p())
}
}
/// Numerically stable subtraction of probabilities in log-space.
pub fn ln_sub_exp(self, other: LogProb) -> LogProb {
let (p0, p1) = (self, other);
assert!(p0 >= p1,
"Subtraction would lead to negative probability, which is undefined in log space.");
if relative_eq!(*p0, *p1) || p0 == Self::ln_zero() {
// the first case leads to zero,
// in the second case p0 and p1 are -inf, which is fine
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + (p1 - p0).ln_one_minus_exp()
}
}
/// Calculate the cumulative sum of the given probabilities in a numerically stable way (Durbin 1998).
pub fn ln_cumsum_exp<I: IntoIterator<Item = LogProb>>(probs: I) -> ScanIter<I> {
probs
.into_iter()
.scan(Self::ln_zero(), Self::scan_ln_add_exp)
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses the trapezoidal rule with n grid points.
pub fn ln_trapezoidal_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
let mut probs = linspace(a, b, n)
.dropping(1)
.dropping_back(1)
.map(|v| LogProb(*density(v) + 2.0f64.ln()))
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - (2.0 * (n - 1) as f64).ln())
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses Simpson's rule with n (odd) grid points.
pub fn ln_simpsons_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
assert_eq!(n % 2, 1, "n must be odd");
let mut probs = linspace(a, b, n)
.enumerate()
.dropping(1)
.dropping_back(1)
.map(|(i, v)| {
let weight = (2 + (i % 2) * 2) as f64;
LogProb(*density(v) + weight.ln()) // factors alter between 2 and 4
})
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - ((n - 1) as f64).ln() - 3.0f64.ln())
}
fn scan_ln_add_exp(s: &mut LogProb, p: LogProb) -> Option<LogProb> {
*s = s.ln_add_exp(p);
Some(*s)
}
}
impl From<LogProb> for Prob {
fn from(p: LogProb) -> Prob {
Prob(p.exp())
}
}
impl From<PHREDProb> for Prob {
fn from(p: PHREDProb) -> Prob {
Prob(10.0f64.powf(-*p / 10.0))
}
}
impl From<Prob> for LogProb {
fn from(p: Prob) -> LogProb {
LogProb(p.ln())
}
}
impl From<PHREDProb> for LogProb {
fn from(p: PHREDProb) -> LogProb {
LogProb(*p * PHRED_TO_LOG_FACTOR)
}
}
impl From<Prob> for PHREDProb {
fn from(p: Prob) -> PHREDProb {
PHREDProb(-10.0 * p.log10())
}
}
impl From<LogProb> for PHREDProb {
fn from(p: LogProb) -> PHREDProb {
PHREDProb(*p * LOG_TO_PHRED_FACTOR)
}
}
impl Default for LogProb {
fn default() -> LogProb {
LogProb::ln_zero()
}
}
impl Default for PHREDProb {
fn default() -> PHREDProb {
PHREDProb::from(Prob(0.0))
}
}
quick_error! {
#[derive(Debug)]
pub enum ProbError {
InvalidProb(value: f64) {
description("invalid probability")
display("probabilty {} not in interval [0,1]", value)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use itertools::Itertools;
#[test]
fn test_sum() {
let probs = [LogProb::ln_zero(), LogProb::ln_one(), LogProb::ln_zero()];
assert_eq!(LogProb::ln_sum_exp(&probs), LogProb::ln_one());
}
#[test]
fn test_empty_sum() {
assert_eq!(LogProb::ln_sum_exp(&[]), LogProb::ln_zero());
}
#[test]
fn test_cumsum() {
let probs = vec![LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.001f64.ln())];
assert_eq!(LogProb::ln_cumsum_exp(probs).collect_vec(),
[LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.011f64.ln())]);
}
#[test]
fn test_sub() {
assert_eq!(LogProb::ln_one().ln_sub_exp(LogProb::ln_one()),
LogProb::ln_zero());
assert_relative_eq!(*LogProb::ln_one().ln_sub_exp(LogProb(0.5f64.ln())),
*LogProb(0.5f64.ln()));
}
#[test]
fn test_one_minus() {
assert_eq!(LogProb::ln_zero().ln_one_minus_exp(), LogProb::ln_one());
assert_eq!(LogProb::ln_one().ln_one_minus_exp(), LogProb::ln_zero());
}
#[test]
fn test_trapezoidal_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_trapezoidal_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
#[test]
fn test_simpsons_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_simpsons_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
} | impl Prob {
pub fn checked(p: f64) -> Result<Self, ProbError> {
if p >= 0.0 && p <= 1.0 { | random_line_split |
mod.rs | // Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! Handling log-probabilities.
pub mod cdf;
use std::mem;
use std::f64;
use std::iter;
use std::ops::{Add, Sub, Mul, Div};
use itertools_num::linspace;
use itertools::Itertools;
use num_traits::Float;
/// A factor to convert log-probabilities to PHRED-scale (phred = p * `LOG_TO_PHRED_FACTOR`).
const LOG_TO_PHRED_FACTOR: f64 = -4.3429448190325175; // -10 * 1 / ln(10)
/// A factor to convert PHRED-scale to log-probabilities (p = phred * `PHRED_TO_LOG_FACTOR`).
const PHRED_TO_LOG_FACTOR: f64 = -0.23025850929940456; // 1 / (-10 * log10(e))
/// Calculate log(1 - p) with p given in log space without loss of precision as described in
/// http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf.
fn ln_1m_exp(p: f64) -> f64 {
assert!(p <= 0.0);
if p < -0.693 {
(-p.exp()).ln_1p()
} else {
(-p.exp_m1()).ln()
}
}
custom_derive! {
/// A newtype for probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::Prob;
///
/// let p = Prob(0.5);
/// let q = Prob(0.2);
///
/// assert_relative_eq!(*(p + q), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
NewtypeMul(*),
NewtypeDiv(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
Default,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct Prob(pub f64);
}
impl Prob {
pub fn checked(p: f64) -> Result<Self, ProbError> {
if p >= 0.0 && p <= 1.0 {
Ok(Prob(p))
} else {
Err(ProbError::InvalidProb(p))
}
}
}
custom_derive! {
/// A newtype for log-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{LogProb, Prob};
///
/// // convert from probability
/// let p = LogProb::from(Prob(0.5));
/// // convert manually
/// let q = LogProb(0.2f64.ln());
/// // obtain zero probability in log-space
/// let o = LogProb::ln_one();
///
/// assert_relative_eq!(*Prob::from(p.ln_add_exp(q) + o), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct LogProb(pub f64);
}
custom_derive! {
/// A newtype for PHRED-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{PHREDProb, Prob};
///
/// let p = PHREDProb::from(Prob(0.5));
///
/// assert_relative_eq!(*Prob::from(p), *Prob(0.5));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct PHREDProb(pub f64);
}
/// Iterator returned by scans over logprobs.
pub type ScanIter<I> = iter::Scan<<I as IntoIterator>::IntoIter,
LogProb,
fn(&mut LogProb, LogProb) -> Option<LogProb>>;
static LOGPROB_LN_ZERO: LogProb = LogProb(f64::NEG_INFINITY);
static LOGPROB_LN_ONE: LogProb = LogProb(0.0);
impl LogProb {
/// Log-space representation of Pr=0
pub fn ln_zero() -> LogProb {
LOGPROB_LN_ZERO
}
/// Log-space representation of Pr=1
pub fn ln_one() -> LogProb {
LOGPROB_LN_ONE
}
/// Numerically stable calculation of 1 - p in log-space.
pub fn ln_one_minus_exp(&self) -> LogProb {
LogProb(ln_1m_exp(**self))
}
/// Numerically stable sum of probabilities in log-space.
pub fn ln_sum_exp(probs: &[LogProb]) -> LogProb {
if probs.is_empty() {
Self::ln_zero()
} else {
let mut pmax = probs[0];
let mut imax = 0;
for (i, &p) in probs.iter().enumerate().skip(1) {
if p > pmax {
pmax = p;
imax = i;
}
}
if pmax == Self::ln_zero() {
Self::ln_zero()
} else if *pmax == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
// TODO use sum() once it has been stabilized:.sum::<usize>()
pmax +
LogProb((probs
.iter()
.enumerate()
.filter_map(|(i, p)| if i == imax {
None
} else {
Some((p - pmax).exp())
})
.fold(0.0, |s, e| s + e))
.ln_1p())
}
}
}
/// Numerically stable addition probabilities in log-space.
pub fn ln_add_exp(self, other: LogProb) -> LogProb {
let (mut p0, mut p1) = (self, other);
if p1 > p0 {
mem::swap(&mut p0, &mut p1);
}
if p0 == Self::ln_zero() {
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + LogProb((p1 - p0).exp().ln_1p())
}
}
/// Numerically stable subtraction of probabilities in log-space.
pub fn ln_sub_exp(self, other: LogProb) -> LogProb {
let (p0, p1) = (self, other);
assert!(p0 >= p1,
"Subtraction would lead to negative probability, which is undefined in log space.");
if relative_eq!(*p0, *p1) || p0 == Self::ln_zero() {
// the first case leads to zero,
// in the second case p0 and p1 are -inf, which is fine
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + (p1 - p0).ln_one_minus_exp()
}
}
/// Calculate the cumulative sum of the given probabilities in a numerically stable way (Durbin 1998).
pub fn ln_cumsum_exp<I: IntoIterator<Item = LogProb>>(probs: I) -> ScanIter<I> {
probs
.into_iter()
.scan(Self::ln_zero(), Self::scan_ln_add_exp)
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses the trapezoidal rule with n grid points.
pub fn ln_trapezoidal_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
let mut probs = linspace(a, b, n)
.dropping(1)
.dropping_back(1)
.map(|v| LogProb(*density(v) + 2.0f64.ln()))
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - (2.0 * (n - 1) as f64).ln())
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses Simpson's rule with n (odd) grid points.
pub fn ln_simpsons_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
assert_eq!(n % 2, 1, "n must be odd");
let mut probs = linspace(a, b, n)
.enumerate()
.dropping(1)
.dropping_back(1)
.map(|(i, v)| {
let weight = (2 + (i % 2) * 2) as f64;
LogProb(*density(v) + weight.ln()) // factors alter between 2 and 4
})
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - ((n - 1) as f64).ln() - 3.0f64.ln())
}
fn scan_ln_add_exp(s: &mut LogProb, p: LogProb) -> Option<LogProb> {
*s = s.ln_add_exp(p);
Some(*s)
}
}
impl From<LogProb> for Prob {
fn from(p: LogProb) -> Prob {
Prob(p.exp())
}
}
impl From<PHREDProb> for Prob {
fn from(p: PHREDProb) -> Prob { | }
impl From<Prob> for LogProb {
fn from(p: Prob) -> LogProb {
LogProb(p.ln())
}
}
impl From<PHREDProb> for LogProb {
fn from(p: PHREDProb) -> LogProb {
LogProb(*p * PHRED_TO_LOG_FACTOR)
}
}
impl From<Prob> for PHREDProb {
fn from(p: Prob) -> PHREDProb {
PHREDProb(-10.0 * p.log10())
}
}
impl From<LogProb> for PHREDProb {
fn from(p: LogProb) -> PHREDProb {
PHREDProb(*p * LOG_TO_PHRED_FACTOR)
}
}
impl Default for LogProb {
fn default() -> LogProb {
LogProb::ln_zero()
}
}
impl Default for PHREDProb {
fn default() -> PHREDProb {
PHREDProb::from(Prob(0.0))
}
}
quick_error! {
#[derive(Debug)]
pub enum ProbError {
InvalidProb(value: f64) {
description("invalid probability")
display("probabilty {} not in interval [0,1]", value)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use itertools::Itertools;
#[test]
fn test_sum() {
let probs = [LogProb::ln_zero(), LogProb::ln_one(), LogProb::ln_zero()];
assert_eq!(LogProb::ln_sum_exp(&probs), LogProb::ln_one());
}
#[test]
fn test_empty_sum() {
assert_eq!(LogProb::ln_sum_exp(&[]), LogProb::ln_zero());
}
#[test]
fn test_cumsum() {
let probs = vec![LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.001f64.ln())];
assert_eq!(LogProb::ln_cumsum_exp(probs).collect_vec(),
[LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.011f64.ln())]);
}
#[test]
fn test_sub() {
assert_eq!(LogProb::ln_one().ln_sub_exp(LogProb::ln_one()),
LogProb::ln_zero());
assert_relative_eq!(*LogProb::ln_one().ln_sub_exp(LogProb(0.5f64.ln())),
*LogProb(0.5f64.ln()));
}
#[test]
fn test_one_minus() {
assert_eq!(LogProb::ln_zero().ln_one_minus_exp(), LogProb::ln_one());
assert_eq!(LogProb::ln_one().ln_one_minus_exp(), LogProb::ln_zero());
}
#[test]
fn test_trapezoidal_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_trapezoidal_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
#[test]
fn test_simpsons_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_simpsons_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
}
|
Prob(10.0f64.powf(-*p / 10.0))
}
| identifier_body |
mod.rs | // Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! Handling log-probabilities.
pub mod cdf;
use std::mem;
use std::f64;
use std::iter;
use std::ops::{Add, Sub, Mul, Div};
use itertools_num::linspace;
use itertools::Itertools;
use num_traits::Float;
/// A factor to convert log-probabilities to PHRED-scale (phred = p * `LOG_TO_PHRED_FACTOR`).
const LOG_TO_PHRED_FACTOR: f64 = -4.3429448190325175; // -10 * 1 / ln(10)
/// A factor to convert PHRED-scale to log-probabilities (p = phred * `PHRED_TO_LOG_FACTOR`).
const PHRED_TO_LOG_FACTOR: f64 = -0.23025850929940456; // 1 / (-10 * log10(e))
/// Calculate log(1 - p) with p given in log space without loss of precision as described in
/// http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf.
fn ln_1m_exp(p: f64) -> f64 {
assert!(p <= 0.0);
if p < -0.693 {
(-p.exp()).ln_1p()
} else {
(-p.exp_m1()).ln()
}
}
custom_derive! {
/// A newtype for probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::Prob;
///
/// let p = Prob(0.5);
/// let q = Prob(0.2);
///
/// assert_relative_eq!(*(p + q), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
NewtypeMul(*),
NewtypeDiv(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
Default,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct Prob(pub f64);
}
impl Prob {
pub fn checked(p: f64) -> Result<Self, ProbError> {
if p >= 0.0 && p <= 1.0 {
Ok(Prob(p))
} else {
Err(ProbError::InvalidProb(p))
}
}
}
custom_derive! {
/// A newtype for log-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{LogProb, Prob};
///
/// // convert from probability
/// let p = LogProb::from(Prob(0.5));
/// // convert manually
/// let q = LogProb(0.2f64.ln());
/// // obtain zero probability in log-space
/// let o = LogProb::ln_one();
///
/// assert_relative_eq!(*Prob::from(p.ln_add_exp(q) + o), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct LogProb(pub f64);
}
custom_derive! {
/// A newtype for PHRED-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{PHREDProb, Prob};
///
/// let p = PHREDProb::from(Prob(0.5));
///
/// assert_relative_eq!(*Prob::from(p), *Prob(0.5));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct PHREDProb(pub f64);
}
/// Iterator returned by scans over logprobs.
pub type ScanIter<I> = iter::Scan<<I as IntoIterator>::IntoIter,
LogProb,
fn(&mut LogProb, LogProb) -> Option<LogProb>>;
static LOGPROB_LN_ZERO: LogProb = LogProb(f64::NEG_INFINITY);
static LOGPROB_LN_ONE: LogProb = LogProb(0.0);
impl LogProb {
/// Log-space representation of Pr=0
pub fn ln_zero() -> LogProb {
LOGPROB_LN_ZERO
}
/// Log-space representation of Pr=1
pub fn ln_one() -> LogProb {
LOGPROB_LN_ONE
}
/// Numerically stable calculation of 1 - p in log-space.
pub fn ln_one_minus_exp(&self) -> LogProb {
LogProb(ln_1m_exp(**self))
}
/// Numerically stable sum of probabilities in log-space.
pub fn ln_sum_exp(probs: &[LogProb]) -> LogProb {
if probs.is_empty() {
Self::ln_zero()
} else {
let mut pmax = probs[0];
let mut imax = 0;
for (i, &p) in probs.iter().enumerate().skip(1) {
if p > pmax {
pmax = p;
imax = i;
}
}
if pmax == Self::ln_zero() {
Self::ln_zero()
} else if *pmax == f64::INFINITY {
LogProb(f64::INFINITY)
} else { | }
}
/// Numerically stable addition probabilities in log-space.
pub fn ln_add_exp(self, other: LogProb) -> LogProb {
let (mut p0, mut p1) = (self, other);
if p1 > p0 {
mem::swap(&mut p0, &mut p1);
}
if p0 == Self::ln_zero() {
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + LogProb((p1 - p0).exp().ln_1p())
}
}
/// Numerically stable subtraction of probabilities in log-space.
pub fn ln_sub_exp(self, other: LogProb) -> LogProb {
let (p0, p1) = (self, other);
assert!(p0 >= p1,
"Subtraction would lead to negative probability, which is undefined in log space.");
if relative_eq!(*p0, *p1) || p0 == Self::ln_zero() {
// the first case leads to zero,
// in the second case p0 and p1 are -inf, which is fine
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + (p1 - p0).ln_one_minus_exp()
}
}
/// Calculate the cumulative sum of the given probabilities in a numerically stable way (Durbin 1998).
pub fn ln_cumsum_exp<I: IntoIterator<Item = LogProb>>(probs: I) -> ScanIter<I> {
probs
.into_iter()
.scan(Self::ln_zero(), Self::scan_ln_add_exp)
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses the trapezoidal rule with n grid points.
pub fn ln_trapezoidal_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
let mut probs = linspace(a, b, n)
.dropping(1)
.dropping_back(1)
.map(|v| LogProb(*density(v) + 2.0f64.ln()))
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - (2.0 * (n - 1) as f64).ln())
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses Simpson's rule with n (odd) grid points.
pub fn ln_simpsons_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
assert_eq!(n % 2, 1, "n must be odd");
let mut probs = linspace(a, b, n)
.enumerate()
.dropping(1)
.dropping_back(1)
.map(|(i, v)| {
let weight = (2 + (i % 2) * 2) as f64;
LogProb(*density(v) + weight.ln()) // factors alter between 2 and 4
})
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - ((n - 1) as f64).ln() - 3.0f64.ln())
}
fn scan_ln_add_exp(s: &mut LogProb, p: LogProb) -> Option<LogProb> {
*s = s.ln_add_exp(p);
Some(*s)
}
}
impl From<LogProb> for Prob {
fn from(p: LogProb) -> Prob {
Prob(p.exp())
}
}
impl From<PHREDProb> for Prob {
fn from(p: PHREDProb) -> Prob {
Prob(10.0f64.powf(-*p / 10.0))
}
}
impl From<Prob> for LogProb {
fn from(p: Prob) -> LogProb {
LogProb(p.ln())
}
}
impl From<PHREDProb> for LogProb {
fn from(p: PHREDProb) -> LogProb {
LogProb(*p * PHRED_TO_LOG_FACTOR)
}
}
impl From<Prob> for PHREDProb {
fn from(p: Prob) -> PHREDProb {
PHREDProb(-10.0 * p.log10())
}
}
impl From<LogProb> for PHREDProb {
fn from(p: LogProb) -> PHREDProb {
PHREDProb(*p * LOG_TO_PHRED_FACTOR)
}
}
impl Default for LogProb {
fn default() -> LogProb {
LogProb::ln_zero()
}
}
impl Default for PHREDProb {
fn default() -> PHREDProb {
PHREDProb::from(Prob(0.0))
}
}
quick_error! {
#[derive(Debug)]
pub enum ProbError {
InvalidProb(value: f64) {
description("invalid probability")
display("probabilty {} not in interval [0,1]", value)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use itertools::Itertools;
#[test]
fn test_sum() {
let probs = [LogProb::ln_zero(), LogProb::ln_one(), LogProb::ln_zero()];
assert_eq!(LogProb::ln_sum_exp(&probs), LogProb::ln_one());
}
#[test]
fn test_empty_sum() {
assert_eq!(LogProb::ln_sum_exp(&[]), LogProb::ln_zero());
}
#[test]
fn test_cumsum() {
let probs = vec![LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.001f64.ln())];
assert_eq!(LogProb::ln_cumsum_exp(probs).collect_vec(),
[LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.011f64.ln())]);
}
#[test]
fn test_sub() {
assert_eq!(LogProb::ln_one().ln_sub_exp(LogProb::ln_one()),
LogProb::ln_zero());
assert_relative_eq!(*LogProb::ln_one().ln_sub_exp(LogProb(0.5f64.ln())),
*LogProb(0.5f64.ln()));
}
#[test]
fn test_one_minus() {
assert_eq!(LogProb::ln_zero().ln_one_minus_exp(), LogProb::ln_one());
assert_eq!(LogProb::ln_one().ln_one_minus_exp(), LogProb::ln_zero());
}
#[test]
fn test_trapezoidal_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_trapezoidal_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
#[test]
fn test_simpsons_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_simpsons_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
}
|
// TODO use sum() once it has been stabilized: .sum::<usize>()
pmax +
LogProb((probs
.iter()
.enumerate()
.filter_map(|(i, p)| if i == imax {
None
} else {
Some((p - pmax).exp())
})
.fold(0.0, |s, e| s + e))
.ln_1p())
}
| conditional_block |
mod.rs | // Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! Handling log-probabilities.
pub mod cdf;
use std::mem;
use std::f64;
use std::iter;
use std::ops::{Add, Sub, Mul, Div};
use itertools_num::linspace;
use itertools::Itertools;
use num_traits::Float;
/// A factor to convert log-probabilities to PHRED-scale (phred = p * `LOG_TO_PHRED_FACTOR`).
const LOG_TO_PHRED_FACTOR: f64 = -4.3429448190325175; // -10 * 1 / ln(10)
/// A factor to convert PHRED-scale to log-probabilities (p = phred * `PHRED_TO_LOG_FACTOR`).
const PHRED_TO_LOG_FACTOR: f64 = -0.23025850929940456; // 1 / (-10 * log10(e))
/// Calculate log(1 - p) with p given in log space without loss of precision as described in
/// http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf.
fn ln_1m_exp(p: f64) -> f64 {
assert!(p <= 0.0);
if p < -0.693 {
(-p.exp()).ln_1p()
} else {
(-p.exp_m1()).ln()
}
}
custom_derive! {
/// A newtype for probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::Prob;
///
/// let p = Prob(0.5);
/// let q = Prob(0.2);
///
/// assert_relative_eq!(*(p + q), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
NewtypeMul(*),
NewtypeDiv(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
Default,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct Prob(pub f64);
}
impl Prob {
pub fn checked(p: f64) -> Result<Self, ProbError> {
if p >= 0.0 && p <= 1.0 {
Ok(Prob(p))
} else {
Err(ProbError::InvalidProb(p))
}
}
}
custom_derive! {
/// A newtype for log-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{LogProb, Prob};
///
/// // convert from probability
/// let p = LogProb::from(Prob(0.5));
/// // convert manually
/// let q = LogProb(0.2f64.ln());
/// // obtain zero probability in log-space
/// let o = LogProb::ln_one();
///
/// assert_relative_eq!(*Prob::from(p.ln_add_exp(q) + o), *Prob(0.7));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct LogProb(pub f64);
}
custom_derive! {
/// A newtype for PHRED-scale probabilities.
///
/// # Example
///
/// ```
/// #[macro_use]
/// extern crate approx;
/// # extern crate bio;
/// # fn main() {
/// use bio::stats::{PHREDProb, Prob};
///
/// let p = PHREDProb::from(Prob(0.5));
///
/// assert_relative_eq!(*Prob::from(p), *Prob(0.5));
/// # }
/// ```
#[derive(
NewtypeFrom,
NewtypeDeref,
NewtypeAdd(*),
NewtypeSub(*),
PartialEq,
PartialOrd,
Copy,
Clone,
Debug,
RustcDecodable,
RustcEncodable
)]
#[derive(Serialize, Deserialize)]
pub struct PHREDProb(pub f64);
}
/// Iterator returned by scans over logprobs.
pub type ScanIter<I> = iter::Scan<<I as IntoIterator>::IntoIter,
LogProb,
fn(&mut LogProb, LogProb) -> Option<LogProb>>;
static LOGPROB_LN_ZERO: LogProb = LogProb(f64::NEG_INFINITY);
static LOGPROB_LN_ONE: LogProb = LogProb(0.0);
impl LogProb {
/// Log-space representation of Pr=0
pub fn ln_zero() -> LogProb {
LOGPROB_LN_ZERO
}
/// Log-space representation of Pr=1
pub fn ln_one() -> LogProb {
LOGPROB_LN_ONE
}
/// Numerically stable calculation of 1 - p in log-space.
pub fn ln_one_minus_exp(&self) -> LogProb {
LogProb(ln_1m_exp(**self))
}
/// Numerically stable sum of probabilities in log-space.
pub fn ln_sum_exp(probs: &[LogProb]) -> LogProb {
if probs.is_empty() {
Self::ln_zero()
} else {
let mut pmax = probs[0];
let mut imax = 0;
for (i, &p) in probs.iter().enumerate().skip(1) {
if p > pmax {
pmax = p;
imax = i;
}
}
if pmax == Self::ln_zero() {
Self::ln_zero()
} else if *pmax == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
// TODO use sum() once it has been stabilized:.sum::<usize>()
pmax +
LogProb((probs
.iter()
.enumerate()
.filter_map(|(i, p)| if i == imax {
None
} else {
Some((p - pmax).exp())
})
.fold(0.0, |s, e| s + e))
.ln_1p())
}
}
}
/// Numerically stable addition probabilities in log-space.
pub fn ln_add_exp(self, other: LogProb) -> LogProb {
let (mut p0, mut p1) = (self, other);
if p1 > p0 {
mem::swap(&mut p0, &mut p1);
}
if p0 == Self::ln_zero() {
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + LogProb((p1 - p0).exp().ln_1p())
}
}
/// Numerically stable subtraction of probabilities in log-space.
pub fn ln_sub_exp(self, other: LogProb) -> LogProb {
let (p0, p1) = (self, other);
assert!(p0 >= p1,
"Subtraction would lead to negative probability, which is undefined in log space.");
if relative_eq!(*p0, *p1) || p0 == Self::ln_zero() {
// the first case leads to zero,
// in the second case p0 and p1 are -inf, which is fine
Self::ln_zero()
} else if *p0 == f64::INFINITY {
LogProb(f64::INFINITY)
} else {
p0 + (p1 - p0).ln_one_minus_exp()
}
}
/// Calculate the cumulative sum of the given probabilities in a numerically stable way (Durbin 1998).
pub fn ln_cumsum_exp<I: IntoIterator<Item = LogProb>>(probs: I) -> ScanIter<I> {
probs
.into_iter()
.scan(Self::ln_zero(), Self::scan_ln_add_exp)
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses the trapezoidal rule with n grid points.
pub fn ln_trapezoidal_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
let mut probs = linspace(a, b, n)
.dropping(1)
.dropping_back(1)
.map(|v| LogProb(*density(v) + 2.0f64.ln()))
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - (2.0 * (n - 1) as f64).ln())
}
/// Integrate numerically stable over given log-space density in the interval [a, b]. Uses Simpson's rule with n (odd) grid points.
pub fn ln_simpsons_integrate_exp<T, D>(density: &D, a: T, b: T, n: usize) -> LogProb where
T: Copy + Add<Output=T> + Sub<Output=T> + Div<Output=T> + Mul<Output=T> + Float,
D: Fn(T) -> LogProb,
f64: From<T>
{
assert_eq!(n % 2, 1, "n must be odd");
let mut probs = linspace(a, b, n)
.enumerate()
.dropping(1)
.dropping_back(1)
.map(|(i, v)| {
let weight = (2 + (i % 2) * 2) as f64;
LogProb(*density(v) + weight.ln()) // factors alter between 2 and 4
})
.collect_vec();
probs.push(density(a));
probs.push(density(b));
let width = f64::from(b - a);
LogProb(*Self::ln_sum_exp(&probs) + width.ln() - ((n - 1) as f64).ln() - 3.0f64.ln())
}
fn scan_ln_add_exp(s: &mut LogProb, p: LogProb) -> Option<LogProb> {
*s = s.ln_add_exp(p);
Some(*s)
}
}
impl From<LogProb> for Prob {
fn from(p: LogProb) -> Prob {
Prob(p.exp())
}
}
impl From<PHREDProb> for Prob {
fn from(p: PHREDProb) -> Prob {
Prob(10.0f64.powf(-*p / 10.0))
}
}
impl From<Prob> for LogProb {
fn f | p: Prob) -> LogProb {
LogProb(p.ln())
}
}
impl From<PHREDProb> for LogProb {
fn from(p: PHREDProb) -> LogProb {
LogProb(*p * PHRED_TO_LOG_FACTOR)
}
}
impl From<Prob> for PHREDProb {
fn from(p: Prob) -> PHREDProb {
PHREDProb(-10.0 * p.log10())
}
}
impl From<LogProb> for PHREDProb {
fn from(p: LogProb) -> PHREDProb {
PHREDProb(*p * LOG_TO_PHRED_FACTOR)
}
}
impl Default for LogProb {
fn default() -> LogProb {
LogProb::ln_zero()
}
}
impl Default for PHREDProb {
fn default() -> PHREDProb {
PHREDProb::from(Prob(0.0))
}
}
quick_error! {
#[derive(Debug)]
pub enum ProbError {
InvalidProb(value: f64) {
description("invalid probability")
display("probabilty {} not in interval [0,1]", value)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use itertools::Itertools;
#[test]
fn test_sum() {
let probs = [LogProb::ln_zero(), LogProb::ln_one(), LogProb::ln_zero()];
assert_eq!(LogProb::ln_sum_exp(&probs), LogProb::ln_one());
}
#[test]
fn test_empty_sum() {
assert_eq!(LogProb::ln_sum_exp(&[]), LogProb::ln_zero());
}
#[test]
fn test_cumsum() {
let probs = vec![LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.001f64.ln())];
assert_eq!(LogProb::ln_cumsum_exp(probs).collect_vec(),
[LogProb::ln_zero(), LogProb(0.01f64.ln()), LogProb(0.011f64.ln())]);
}
#[test]
fn test_sub() {
assert_eq!(LogProb::ln_one().ln_sub_exp(LogProb::ln_one()),
LogProb::ln_zero());
assert_relative_eq!(*LogProb::ln_one().ln_sub_exp(LogProb(0.5f64.ln())),
*LogProb(0.5f64.ln()));
}
#[test]
fn test_one_minus() {
assert_eq!(LogProb::ln_zero().ln_one_minus_exp(), LogProb::ln_one());
assert_eq!(LogProb::ln_one().ln_one_minus_exp(), LogProb::ln_zero());
}
#[test]
fn test_trapezoidal_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_trapezoidal_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
#[test]
fn test_simpsons_integrate() {
let density = |_| LogProb(0.1f64.ln());
let prob = LogProb::ln_simpsons_integrate_exp(&density, 0.0, 10.0, 5);
assert_relative_eq!(*prob, *LogProb::ln_one(), epsilon = 0.0000001);
}
}
| rom( | identifier_name |
cci_class_6.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod kitties {
pub struct cat<U> {
info : Vec<U>,
meows : uint,
pub how_hungry : int,
}
impl<U> cat<U> {
pub fn speak<T>(&mut self, stuff: Vec<T> ) {
self.meows += stuff.len();
}
pub fn meow_count(&mut self) -> uint { self.meows }
}
pub fn cat<U>(in_x : uint, in_y : int, in_info: Vec<U> ) -> cat<U> |
}
| {
cat {
meows: in_x,
how_hungry: in_y,
info: in_info
}
} | identifier_body |
cci_class_6.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod kitties { | info : Vec<U>,
meows : uint,
pub how_hungry : int,
}
impl<U> cat<U> {
pub fn speak<T>(&mut self, stuff: Vec<T> ) {
self.meows += stuff.len();
}
pub fn meow_count(&mut self) -> uint { self.meows }
}
pub fn cat<U>(in_x : uint, in_y : int, in_info: Vec<U> ) -> cat<U> {
cat {
meows: in_x,
how_hungry: in_y,
info: in_info
}
}
} |
pub struct cat<U> { | random_line_split |
cci_class_6.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod kitties {
pub struct | <U> {
info : Vec<U>,
meows : uint,
pub how_hungry : int,
}
impl<U> cat<U> {
pub fn speak<T>(&mut self, stuff: Vec<T> ) {
self.meows += stuff.len();
}
pub fn meow_count(&mut self) -> uint { self.meows }
}
pub fn cat<U>(in_x : uint, in_y : int, in_info: Vec<U> ) -> cat<U> {
cat {
meows: in_x,
how_hungry: in_y,
info: in_info
}
}
}
| cat | identifier_name |
issue-1362.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | let x: u32 = 20i32; //~ ERROR mismatched types
}
// NOTE: Do not add any extra lines as the line number the error is
// on is significant; an error later in the source file might not
// trigger the bug. | // except according to those terms.
// Regression test for issue #1362 - without that fix the span will be bogus
// no-reformat
fn main() { | random_line_split |
issue-1362.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for issue #1362 - without that fix the span will be bogus
// no-reformat
fn main() |
// NOTE: Do not add any extra lines as the line number the error is
// on is significant; an error later in the source file might not
// trigger the bug.
| {
let x: u32 = 20i32; //~ ERROR mismatched types
} | identifier_body |
issue-1362.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for issue #1362 - without that fix the span will be bogus
// no-reformat
fn | () {
let x: u32 = 20i32; //~ ERROR mismatched types
}
// NOTE: Do not add any extra lines as the line number the error is
// on is significant; an error later in the source file might not
// trigger the bug.
| main | identifier_name |
pdf.rs | // Copyright 2018-2019, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT>
use std::convert::TryFrom;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::mem;
use std::ops::Deref;
use std::path::Path;
use std::ptr;
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
use enums::{PdfMetadata, PdfOutline};
use enums::{PdfVersion, SurfaceType};
use error::Error;
use ffi;
use surface::Surface;
use utils::status_to_result;
#[cfg(feature = "use_glib")]
use glib::translate::*;
impl PdfVersion {
pub fn as_str(self) -> Option<&'static str> {
unsafe {
let res = ffi::cairo_pdf_version_to_string(self.into());
res.as_ref()
.and_then(|cstr| CStr::from_ptr(cstr as _).to_str().ok())
}
}
}
declare_surface!(PdfSurface, SurfaceType::Pdf);
impl PdfSurface {
pub fn new<P: AsRef<Path>>(width: f64, height: f64, path: P) -> Result<Self, Error> {
let path = path.as_ref().to_string_lossy().into_owned();
let path = CString::new(path).unwrap();
unsafe { Self::from_raw_full(ffi::cairo_pdf_surface_create(path.as_ptr(), width, height)) }
}
for_stream_constructors!(cairo_pdf_surface_create_for_stream);
pub fn get_versions() -> impl Iterator<Item = PdfVersion> {
let vers_slice = unsafe {
let mut vers_ptr = ptr::null_mut();
let mut num_vers = mem::MaybeUninit::uninit();
ffi::cairo_pdf_get_versions(&mut vers_ptr, num_vers.as_mut_ptr());
std::slice::from_raw_parts(vers_ptr, num_vers.assume_init() as _)
};
vers_slice.iter().map(|v| PdfVersion::from(*v))
}
pub fn restrict(&self, version: PdfVersion) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_restrict_to_version(self.0.to_raw_none(), version.into());
}
self.status()
}
pub fn set_size(&self, width: f64, height: f64) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_set_size(self.0.to_raw_none(), width, height);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_metadata(&self, metadata: PdfMetadata, value: &str) -> Result<(), Error> {
let value = CString::new(value).unwrap();
unsafe {
ffi::cairo_pdf_surface_set_metadata(
self.0.to_raw_none(),
metadata.into(),
value.as_ptr(),
);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_page_label(&self, label: &str) -> Result<(), Error> {
let label = CString::new(label).unwrap();
unsafe {
ffi::cairo_pdf_surface_set_page_label(self.0.to_raw_none(), label.as_ptr());
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_thumbnail_size(&self, width: i32, height: i32) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_set_thumbnail_size(
self.0.to_raw_none(),
width as _,
height as _,
);
}
self.status()
}
| pub fn add_outline(
&self,
parent_id: i32,
name: &str,
link_attribs: &str,
flags: PdfOutline,
) -> Result<i32, Error> {
let name = CString::new(name).unwrap();
let link_attribs = CString::new(link_attribs).unwrap();
let res = unsafe {
ffi::cairo_pdf_surface_add_outline(
self.0.to_raw_none(),
parent_id,
name.as_ptr(),
link_attribs.as_ptr(),
flags.bits() as _,
) as _
};
self.status()?;
Ok(res)
}
fn status(&self) -> Result<(), Error> {
let status = unsafe { ffi::cairo_surface_status(self.to_raw_none()) };
status_to_result(status)
}
}
#[cfg(test)]
mod test {
use super::*;
use context::*;
use tempfile::tempfile;
fn draw(surface: &Surface) {
let cr = Context::new(surface);
cr.set_line_width(25.0);
cr.set_source_rgba(1.0, 0.0, 0.0, 0.5);
cr.line_to(0., 0.);
cr.line_to(100., 100.);
cr.stroke();
cr.set_source_rgba(0.0, 0.0, 1.0, 0.5);
cr.line_to(0., 100.);
cr.line_to(100., 0.);
cr.stroke();
}
fn draw_in_buffer() -> Vec<u8> {
let buffer: Vec<u8> = vec![];
let surface = PdfSurface::for_stream(100., 100., buffer).unwrap();
draw(&surface);
*surface.finish_output_stream().unwrap().downcast().unwrap()
}
#[test]
fn versions() {
assert!(PdfSurface::get_versions().any(|v| v == PdfVersion::_1_4));
}
#[test]
fn version_string() {
let ver_str = PdfVersion::_1_4.as_str().unwrap();
assert_eq!(ver_str, "PDF 1.4");
}
#[test]
#[cfg(unix)]
fn file() {
let surface = PdfSurface::new(100., 100., "/dev/null").unwrap();
draw(&surface);
surface.finish();
}
#[test]
fn writer() {
let file = tempfile().expect("tempfile failed");
let surface = PdfSurface::for_stream(100., 100., file).unwrap();
draw(&surface);
let stream = surface.finish_output_stream().unwrap();
let file = stream.downcast::<std::fs::File>().unwrap();
let buffer = draw_in_buffer();
let file_size = file.metadata().unwrap().len();
assert_eq!(file_size, buffer.len() as u64);
}
#[test]
fn ref_writer() {
let mut file = tempfile().expect("tempfile failed");
let surface = unsafe { PdfSurface::for_raw_stream(100., 100., &mut file).unwrap() };
draw(&surface);
surface.finish_output_stream().unwrap();
drop(file);
}
#[test]
fn buffer() {
let buffer = draw_in_buffer();
let header = b"%PDF-1.5";
assert_eq!(&buffer[..header.len()], header);
}
#[test]
fn custom_writer() {
struct CustomWriter(usize);
impl io::Write for CustomWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0 += buf.len();
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
let custom_writer = CustomWriter(0);
let surface = PdfSurface::for_stream(20., 20., custom_writer).unwrap();
surface.set_size(100., 100.).unwrap();
draw(&surface);
let stream = surface.finish_output_stream().unwrap();
let custom_writer = stream.downcast::<CustomWriter>().unwrap();
let buffer = draw_in_buffer();
assert_eq!(custom_writer.0, buffer.len());
}
fn with_panicky_stream() -> PdfSurface {
struct PanicWriter;
impl io::Write for PanicWriter {
fn write(&mut self, _buf: &[u8]) -> io::Result<usize> {
panic!("panic in writer");
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
let surface = PdfSurface::for_stream(20., 20., PanicWriter).unwrap();
surface.finish();
surface
}
#[test]
#[should_panic]
fn finish_stream_propagates_panic() {
let _ = with_panicky_stream().finish_output_stream();
}
} | #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] | random_line_split |
pdf.rs | // Copyright 2018-2019, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT>
use std::convert::TryFrom;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::mem;
use std::ops::Deref;
use std::path::Path;
use std::ptr;
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
use enums::{PdfMetadata, PdfOutline};
use enums::{PdfVersion, SurfaceType};
use error::Error;
use ffi;
use surface::Surface;
use utils::status_to_result;
#[cfg(feature = "use_glib")]
use glib::translate::*;
impl PdfVersion {
pub fn as_str(self) -> Option<&'static str> {
unsafe {
let res = ffi::cairo_pdf_version_to_string(self.into());
res.as_ref()
.and_then(|cstr| CStr::from_ptr(cstr as _).to_str().ok())
}
}
}
declare_surface!(PdfSurface, SurfaceType::Pdf);
impl PdfSurface {
pub fn new<P: AsRef<Path>>(width: f64, height: f64, path: P) -> Result<Self, Error> {
let path = path.as_ref().to_string_lossy().into_owned();
let path = CString::new(path).unwrap();
unsafe { Self::from_raw_full(ffi::cairo_pdf_surface_create(path.as_ptr(), width, height)) }
}
for_stream_constructors!(cairo_pdf_surface_create_for_stream);
pub fn get_versions() -> impl Iterator<Item = PdfVersion> {
let vers_slice = unsafe {
let mut vers_ptr = ptr::null_mut();
let mut num_vers = mem::MaybeUninit::uninit();
ffi::cairo_pdf_get_versions(&mut vers_ptr, num_vers.as_mut_ptr());
std::slice::from_raw_parts(vers_ptr, num_vers.assume_init() as _)
};
vers_slice.iter().map(|v| PdfVersion::from(*v))
}
pub fn restrict(&self, version: PdfVersion) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_restrict_to_version(self.0.to_raw_none(), version.into());
}
self.status()
}
pub fn set_size(&self, width: f64, height: f64) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_set_size(self.0.to_raw_none(), width, height);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_metadata(&self, metadata: PdfMetadata, value: &str) -> Result<(), Error> {
let value = CString::new(value).unwrap();
unsafe {
ffi::cairo_pdf_surface_set_metadata(
self.0.to_raw_none(),
metadata.into(),
value.as_ptr(),
);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_page_label(&self, label: &str) -> Result<(), Error> {
let label = CString::new(label).unwrap();
unsafe {
ffi::cairo_pdf_surface_set_page_label(self.0.to_raw_none(), label.as_ptr());
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_thumbnail_size(&self, width: i32, height: i32) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_set_thumbnail_size(
self.0.to_raw_none(),
width as _,
height as _,
);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn add_outline(
&self,
parent_id: i32,
name: &str,
link_attribs: &str,
flags: PdfOutline,
) -> Result<i32, Error> {
let name = CString::new(name).unwrap();
let link_attribs = CString::new(link_attribs).unwrap();
let res = unsafe {
ffi::cairo_pdf_surface_add_outline(
self.0.to_raw_none(),
parent_id,
name.as_ptr(),
link_attribs.as_ptr(),
flags.bits() as _,
) as _
};
self.status()?;
Ok(res)
}
fn status(&self) -> Result<(), Error> {
let status = unsafe { ffi::cairo_surface_status(self.to_raw_none()) };
status_to_result(status)
}
}
#[cfg(test)]
mod test {
use super::*;
use context::*;
use tempfile::tempfile;
fn draw(surface: &Surface) {
let cr = Context::new(surface);
cr.set_line_width(25.0);
cr.set_source_rgba(1.0, 0.0, 0.0, 0.5);
cr.line_to(0., 0.);
cr.line_to(100., 100.);
cr.stroke();
cr.set_source_rgba(0.0, 0.0, 1.0, 0.5);
cr.line_to(0., 100.);
cr.line_to(100., 0.);
cr.stroke();
}
fn draw_in_buffer() -> Vec<u8> {
let buffer: Vec<u8> = vec![];
let surface = PdfSurface::for_stream(100., 100., buffer).unwrap();
draw(&surface);
*surface.finish_output_stream().unwrap().downcast().unwrap()
}
#[test]
fn versions() {
assert!(PdfSurface::get_versions().any(|v| v == PdfVersion::_1_4));
}
#[test]
fn version_string() {
let ver_str = PdfVersion::_1_4.as_str().unwrap();
assert_eq!(ver_str, "PDF 1.4");
}
#[test]
#[cfg(unix)]
fn file() {
let surface = PdfSurface::new(100., 100., "/dev/null").unwrap();
draw(&surface);
surface.finish();
}
#[test]
fn writer() {
let file = tempfile().expect("tempfile failed");
let surface = PdfSurface::for_stream(100., 100., file).unwrap();
draw(&surface);
let stream = surface.finish_output_stream().unwrap();
let file = stream.downcast::<std::fs::File>().unwrap();
let buffer = draw_in_buffer();
let file_size = file.metadata().unwrap().len();
assert_eq!(file_size, buffer.len() as u64);
}
#[test]
fn ref_writer() {
let mut file = tempfile().expect("tempfile failed");
let surface = unsafe { PdfSurface::for_raw_stream(100., 100., &mut file).unwrap() };
draw(&surface);
surface.finish_output_stream().unwrap();
drop(file);
}
#[test]
fn buffer() {
let buffer = draw_in_buffer();
let header = b"%PDF-1.5";
assert_eq!(&buffer[..header.len()], header);
}
#[test]
fn custom_writer() {
struct CustomWriter(usize);
impl io::Write for CustomWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0 += buf.len();
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
let custom_writer = CustomWriter(0);
let surface = PdfSurface::for_stream(20., 20., custom_writer).unwrap();
surface.set_size(100., 100.).unwrap();
draw(&surface);
let stream = surface.finish_output_stream().unwrap();
let custom_writer = stream.downcast::<CustomWriter>().unwrap();
let buffer = draw_in_buffer();
assert_eq!(custom_writer.0, buffer.len());
}
fn with_panicky_stream() -> PdfSurface {
struct | ;
impl io::Write for PanicWriter {
fn write(&mut self, _buf: &[u8]) -> io::Result<usize> {
panic!("panic in writer");
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
let surface = PdfSurface::for_stream(20., 20., PanicWriter).unwrap();
surface.finish();
surface
}
#[test]
#[should_panic]
fn finish_stream_propagates_panic() {
let _ = with_panicky_stream().finish_output_stream();
}
}
| PanicWriter | identifier_name |
pdf.rs | // Copyright 2018-2019, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT>
use std::convert::TryFrom;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::mem;
use std::ops::Deref;
use std::path::Path;
use std::ptr;
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
use enums::{PdfMetadata, PdfOutline};
use enums::{PdfVersion, SurfaceType};
use error::Error;
use ffi;
use surface::Surface;
use utils::status_to_result;
#[cfg(feature = "use_glib")]
use glib::translate::*;
impl PdfVersion {
pub fn as_str(self) -> Option<&'static str> {
unsafe {
let res = ffi::cairo_pdf_version_to_string(self.into());
res.as_ref()
.and_then(|cstr| CStr::from_ptr(cstr as _).to_str().ok())
}
}
}
declare_surface!(PdfSurface, SurfaceType::Pdf);
impl PdfSurface {
pub fn new<P: AsRef<Path>>(width: f64, height: f64, path: P) -> Result<Self, Error> {
let path = path.as_ref().to_string_lossy().into_owned();
let path = CString::new(path).unwrap();
unsafe { Self::from_raw_full(ffi::cairo_pdf_surface_create(path.as_ptr(), width, height)) }
}
for_stream_constructors!(cairo_pdf_surface_create_for_stream);
pub fn get_versions() -> impl Iterator<Item = PdfVersion> {
let vers_slice = unsafe {
let mut vers_ptr = ptr::null_mut();
let mut num_vers = mem::MaybeUninit::uninit();
ffi::cairo_pdf_get_versions(&mut vers_ptr, num_vers.as_mut_ptr());
std::slice::from_raw_parts(vers_ptr, num_vers.assume_init() as _)
};
vers_slice.iter().map(|v| PdfVersion::from(*v))
}
pub fn restrict(&self, version: PdfVersion) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_restrict_to_version(self.0.to_raw_none(), version.into());
}
self.status()
}
pub fn set_size(&self, width: f64, height: f64) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_set_size(self.0.to_raw_none(), width, height);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_metadata(&self, metadata: PdfMetadata, value: &str) -> Result<(), Error> {
let value = CString::new(value).unwrap();
unsafe {
ffi::cairo_pdf_surface_set_metadata(
self.0.to_raw_none(),
metadata.into(),
value.as_ptr(),
);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_page_label(&self, label: &str) -> Result<(), Error> {
let label = CString::new(label).unwrap();
unsafe {
ffi::cairo_pdf_surface_set_page_label(self.0.to_raw_none(), label.as_ptr());
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn set_thumbnail_size(&self, width: i32, height: i32) -> Result<(), Error> {
unsafe {
ffi::cairo_pdf_surface_set_thumbnail_size(
self.0.to_raw_none(),
width as _,
height as _,
);
}
self.status()
}
#[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))]
pub fn add_outline(
&self,
parent_id: i32,
name: &str,
link_attribs: &str,
flags: PdfOutline,
) -> Result<i32, Error> {
let name = CString::new(name).unwrap();
let link_attribs = CString::new(link_attribs).unwrap();
let res = unsafe {
ffi::cairo_pdf_surface_add_outline(
self.0.to_raw_none(),
parent_id,
name.as_ptr(),
link_attribs.as_ptr(),
flags.bits() as _,
) as _
};
self.status()?;
Ok(res)
}
fn status(&self) -> Result<(), Error> {
let status = unsafe { ffi::cairo_surface_status(self.to_raw_none()) };
status_to_result(status)
}
}
#[cfg(test)]
mod test {
use super::*;
use context::*;
use tempfile::tempfile;
fn draw(surface: &Surface) {
let cr = Context::new(surface);
cr.set_line_width(25.0);
cr.set_source_rgba(1.0, 0.0, 0.0, 0.5);
cr.line_to(0., 0.);
cr.line_to(100., 100.);
cr.stroke();
cr.set_source_rgba(0.0, 0.0, 1.0, 0.5);
cr.line_to(0., 100.);
cr.line_to(100., 0.);
cr.stroke();
}
fn draw_in_buffer() -> Vec<u8> {
let buffer: Vec<u8> = vec![];
let surface = PdfSurface::for_stream(100., 100., buffer).unwrap();
draw(&surface);
*surface.finish_output_stream().unwrap().downcast().unwrap()
}
#[test]
fn versions() {
assert!(PdfSurface::get_versions().any(|v| v == PdfVersion::_1_4));
}
#[test]
fn version_string() {
let ver_str = PdfVersion::_1_4.as_str().unwrap();
assert_eq!(ver_str, "PDF 1.4");
}
#[test]
#[cfg(unix)]
fn file() {
let surface = PdfSurface::new(100., 100., "/dev/null").unwrap();
draw(&surface);
surface.finish();
}
#[test]
fn writer() {
let file = tempfile().expect("tempfile failed");
let surface = PdfSurface::for_stream(100., 100., file).unwrap();
draw(&surface);
let stream = surface.finish_output_stream().unwrap();
let file = stream.downcast::<std::fs::File>().unwrap();
let buffer = draw_in_buffer();
let file_size = file.metadata().unwrap().len();
assert_eq!(file_size, buffer.len() as u64);
}
#[test]
fn ref_writer() {
let mut file = tempfile().expect("tempfile failed");
let surface = unsafe { PdfSurface::for_raw_stream(100., 100., &mut file).unwrap() };
draw(&surface);
surface.finish_output_stream().unwrap();
drop(file);
}
#[test]
fn buffer() {
let buffer = draw_in_buffer();
let header = b"%PDF-1.5";
assert_eq!(&buffer[..header.len()], header);
}
#[test]
fn custom_writer() {
struct CustomWriter(usize);
impl io::Write for CustomWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0 += buf.len();
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
let custom_writer = CustomWriter(0);
let surface = PdfSurface::for_stream(20., 20., custom_writer).unwrap();
surface.set_size(100., 100.).unwrap();
draw(&surface);
let stream = surface.finish_output_stream().unwrap();
let custom_writer = stream.downcast::<CustomWriter>().unwrap();
let buffer = draw_in_buffer();
assert_eq!(custom_writer.0, buffer.len());
}
fn with_panicky_stream() -> PdfSurface |
#[test]
#[should_panic]
fn finish_stream_propagates_panic() {
let _ = with_panicky_stream().finish_output_stream();
}
}
| {
struct PanicWriter;
impl io::Write for PanicWriter {
fn write(&mut self, _buf: &[u8]) -> io::Result<usize> {
panic!("panic in writer");
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
let surface = PdfSurface::for_stream(20., 20., PanicWriter).unwrap();
surface.finish();
surface
} | identifier_body |
node_ops.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
#[cfg(feature = "simulated-payouts")]
use sn_data_types::Transfer;
use sn_data_types::{
ActorHistory, Blob, CreditAgreementProof, NodeAge, PublicKey, RewardAccumulation,
RewardProposal, SignedTransfer, TransferAgreementProof,
};
use sn_messaging::client::ClientMsg;
use sn_messaging::{
client::{
BlobRead, BlobWrite, ClientSigned, DataCmd, DataExchange, DataQuery, ProcessMsg,
ProcessingError, QueryResponse, SupportingInfo,
},
node::NodeMsg,
Aggregation, DstLocation, EndUser, MessageId, SrcLocation,
};
use sn_routing::Prefix;
use std::{
collections::{BTreeMap, BTreeSet},
fmt::{Debug, Formatter},
};
use xor_name::XorName;
/// Internal messages are what is passed along
/// within a node, between the entry point and
/// exit point of remote messages.
/// In other words, when communication from another
/// participant at the network arrives, it is mapped
/// to an internal message, that can
/// then be passed along to its proper processing module
/// at the node. At a node module, the result of such a call
/// is also an internal message.
/// Finally, an internal message might be destined for messaging
/// module, by which it leaves the process boundary of this node
/// and is sent on the wire to some other destination(s) on the network.
/// Vec of NodeDuty
pub type NodeDuties = Vec<NodeDuty>;
/// Common duties run by all nodes.
#[allow(clippy::large_enum_variant)]
pub enum | {
GetNodeWalletKey {
node_name: XorName,
msg_id: MessageId,
origin: SrcLocation,
},
PropagateTransfer {
proof: CreditAgreementProof,
msg_id: MessageId,
origin: SrcLocation,
},
SetNodeWallet {
wallet_id: PublicKey,
node_id: XorName,
},
GetTransferReplicaEvents {
msg_id: MessageId,
origin: SrcLocation,
},
/// Validate a transfer from a client
ValidateClientTransfer {
signed_transfer: SignedTransfer,
msg_id: MessageId,
origin: SrcLocation,
},
/// Register a transfer from a client
RegisterTransfer {
proof: TransferAgreementProof,
msg_id: MessageId,
origin: SrcLocation,
},
/// TEMP: Simulate a transfer from a client
SimulatePayout {
transfer: Transfer,
msg_id: MessageId,
origin: SrcLocation,
},
ReadChunk {
read: BlobRead,
msg_id: MessageId,
},
WriteChunk {
write: BlobWrite,
msg_id: MessageId,
client_signed: ClientSigned,
},
ProcessRepublish {
chunk: Blob,
msg_id: MessageId,
},
/// Run at data-section Elders on receiving the result of
/// read operations from Adults
RecordAdultReadLiveness {
response: QueryResponse,
correlation_id: MessageId,
src: XorName,
},
/// Get section elders.
GetSectionElders {
msg_id: MessageId,
origin: SrcLocation,
},
/// Get key transfers since specified version.
GetTransfersHistory {
/// The wallet key.
at: PublicKey,
/// The last version of transfers we know of.
since_version: usize,
msg_id: MessageId,
origin: SrcLocation,
},
/// Get Balance at a specific key
GetBalance {
at: PublicKey,
msg_id: MessageId,
origin: SrcLocation,
},
GetStoreCost {
/// Number of bytes to write.
bytes: u64,
msg_id: MessageId,
origin: SrcLocation,
},
/// Proposal of payout of rewards.
ReceiveRewardProposal(RewardProposal),
/// Accumulation of payout of rewards.
ReceiveRewardAccumulation(RewardAccumulation),
Genesis,
EldersChanged {
/// Our section prefix.
our_prefix: Prefix,
/// Our section public key.
our_key: PublicKey,
/// The new Elders.
new_elders: BTreeSet<XorName>,
/// Oldie or newbie?
newbie: bool,
},
AdultsChanged {
/// Remaining Adults in our section.
remaining: BTreeSet<XorName>,
/// New Adults in our section.
added: BTreeSet<XorName>,
/// Removed Adults in our section.
removed: BTreeSet<XorName>,
},
SectionSplit {
/// Our section prefix.
our_prefix: Prefix,
/// our section public key
our_key: PublicKey,
/// The new Elders of our section.
our_new_elders: BTreeSet<XorName>,
/// The new Elders of our sibling section.
their_new_elders: BTreeSet<XorName>,
/// The PK of the sibling section, as this event is fired during a split.
sibling_key: PublicKey,
/// oldie or newbie?
newbie: bool,
},
/// When demoted, node levels down
LevelDown,
/// Initiates the node with state from peers.
SynchState {
/// The registered wallet keys for nodes earning rewards
node_rewards: BTreeMap<XorName, (NodeAge, PublicKey)>,
/// The wallets of users on the network.
user_wallets: BTreeMap<PublicKey, ActorHistory>,
/// The metadata stored on Elders.
metadata: DataExchange,
},
/// As members are lost for various reasons
/// there are certain things nodes need
/// to do, to update for that.
ProcessLostMember {
name: XorName,
age: u8,
},
/// Storage reaching max capacity.
ReachingMaxCapacity,
/// Increment count of full nodes in the network
IncrementFullNodeCount {
/// Node ID of node that reached max capacity.
node_id: PublicKey,
},
/// Sets joining allowed to true or false.
SetNodeJoinsAllowed(bool),
/// Send a message to the specified dst.
Send(OutgoingMsg),
/// Send a lazy error as a result of a specific message.
/// The aim here is for the sender to respond with any missing state
SendError(OutgoingLazyError),
/// Send supporting info for a given processing error.
/// This should be any missing state required to proceed at the erring node.
SendSupport(OutgoingSupportingInfo),
/// Send the same request to each individual node.
SendToNodes {
msg: NodeMsg,
targets: BTreeSet<XorName>,
aggregation: Aggregation,
},
/// Process read of data
ProcessRead {
query: DataQuery,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
},
/// Process write of data
ProcessWrite {
cmd: DataCmd,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
},
/// Process Payment for a DataCmd
ProcessDataPayment {
msg: ProcessMsg,
origin: EndUser,
},
/// Receive a chunk that is being replicated.
/// This is run at an Adult (the new holder).
ReplicateChunk {
data: Blob,
msg_id: MessageId,
},
/// Create proposals to vote unresponsive nodes as offline
ProposeOffline(Vec<XorName>),
NoOp,
}
impl From<NodeDuty> for NodeDuties {
fn from(duty: NodeDuty) -> Self {
if matches!(duty, NodeDuty::NoOp) {
vec![]
} else {
vec![duty]
}
}
}
impl Debug for NodeDuty {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Genesis {.. } => write!(f, "Genesis"),
Self::GetNodeWalletKey {.. } => write!(f, "GetNodeWalletKey"),
Self::PropagateTransfer {.. } => write!(f, "PropagateTransfer"),
Self::SetNodeWallet {.. } => write!(f, "SetNodeWallet"),
Self::GetTransferReplicaEvents {.. } => write!(f, "GetTransferReplicaEvents"),
Self::ValidateClientTransfer {.. } => write!(f, "ValidateClientTransfer"),
Self::RegisterTransfer {.. } => write!(f, "RegisterTransfer"),
Self::GetBalance {.. } => write!(f, "GetBalance"),
Self::GetStoreCost {.. } => write!(f, "GetStoreCost"),
Self::SimulatePayout {.. } => write!(f, "SimulatePayout"),
Self::GetTransfersHistory {.. } => write!(f, "GetTransfersHistory"),
Self::ReadChunk {.. } => write!(f, "ReadChunk"),
Self::WriteChunk {.. } => write!(f, "WriteChunk"),
Self::ProcessRepublish {.. } => write!(f, "ProcessRepublish"),
Self::RecordAdultReadLiveness {
correlation_id,
response,
src,
} => write!(
f,
"RecordAdultReadLiveness {{ correlation_id: {}, response: {:?}, src: {} }}",
correlation_id, response, src
),
Self::ReceiveRewardProposal {.. } => write!(f, "ReceiveRewardProposal"),
Self::ReceiveRewardAccumulation {.. } => write!(f, "ReceiveRewardAccumulation"),
// ------
Self::LevelDown => write!(f, "LevelDown"),
Self::SynchState {.. } => write!(f, "SynchState"),
Self::EldersChanged {.. } => write!(f, "EldersChanged"),
Self::AdultsChanged {.. } => write!(f, "AdultsChanged"),
Self::SectionSplit {.. } => write!(f, "SectionSplit"),
Self::GetSectionElders {.. } => write!(f, "GetSectionElders"),
Self::NoOp => write!(f, "No op."),
Self::ReachingMaxCapacity => write!(f, "ReachingMaxCapacity"),
Self::ProcessLostMember {.. } => write!(f, "ProcessLostMember"),
//Self::ProcessRelocatedMember {.. } => write!(f, "ProcessRelocatedMember"),
Self::IncrementFullNodeCount {.. } => write!(f, "IncrementFullNodeCount"),
Self::SetNodeJoinsAllowed(_) => write!(f, "SetNodeJoinsAllowed"),
Self::Send(msg) => write!(f, "Send [ msg: {:?} ]", msg),
Self::SendError(msg) => write!(f, "SendError [ msg: {:?} ]", msg),
Self::SendSupport(msg) => write!(f, "SendSupport [ msg: {:?} ]", msg),
Self::SendToNodes {
msg,
targets,
aggregation,
} => write!(
f,
"SendToNodes [ msg: {:?}, targets: {:?}, aggregation: {:?} ]",
msg, targets, aggregation
),
Self::ProcessRead {.. } => write!(f, "ProcessRead"),
Self::ProcessWrite {.. } => write!(f, "ProcessWrite"),
Self::ProcessDataPayment {.. } => write!(f, "ProcessDataPayment"),
Self::ReplicateChunk {.. } => write!(f, "ReplicateChunk"),
Self::ProposeOffline(nodes) => write!(f, "ProposeOffline({:?})", nodes),
}
}
}
// --------------- Messaging ---------------
#[derive(Debug, Clone)]
pub struct OutgoingMsg {
pub msg: MsgType,
pub dst: DstLocation,
pub section_source: bool,
pub aggregation: Aggregation,
}
#[derive(Debug, Clone)]
#[allow(clippy::large_enum_variant)]
pub enum MsgType {
Node(NodeMsg),
Client(ClientMsg),
}
#[derive(Debug, Clone)]
pub struct OutgoingLazyError {
pub msg: ProcessingError,
pub dst: DstLocation,
}
#[derive(Debug, Clone)]
pub struct OutgoingSupportingInfo {
pub msg: SupportingInfo,
pub dst: DstLocation,
}
| NodeDuty | identifier_name |
node_ops.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
#[cfg(feature = "simulated-payouts")]
use sn_data_types::Transfer;
use sn_data_types::{
ActorHistory, Blob, CreditAgreementProof, NodeAge, PublicKey, RewardAccumulation,
RewardProposal, SignedTransfer, TransferAgreementProof,
};
use sn_messaging::client::ClientMsg;
use sn_messaging::{
client::{
BlobRead, BlobWrite, ClientSigned, DataCmd, DataExchange, DataQuery, ProcessMsg,
ProcessingError, QueryResponse, SupportingInfo,
},
node::NodeMsg,
Aggregation, DstLocation, EndUser, MessageId, SrcLocation,
};
use sn_routing::Prefix;
use std::{
collections::{BTreeMap, BTreeSet},
fmt::{Debug, Formatter},
};
use xor_name::XorName;
/// Internal messages are what is passed along
/// within a node, between the entry point and
/// exit point of remote messages.
/// In other words, when communication from another
/// participant at the network arrives, it is mapped
/// to an internal message, that can
/// then be passed along to its proper processing module
/// at the node. At a node module, the result of such a call
/// is also an internal message.
/// Finally, an internal message might be destined for messaging
/// module, by which it leaves the process boundary of this node
/// and is sent on the wire to some other destination(s) on the network.
/// Vec of NodeDuty
pub type NodeDuties = Vec<NodeDuty>;
/// Common duties run by all nodes.
#[allow(clippy::large_enum_variant)]
pub enum NodeDuty {
GetNodeWalletKey {
node_name: XorName,
msg_id: MessageId,
origin: SrcLocation,
},
PropagateTransfer {
proof: CreditAgreementProof,
msg_id: MessageId,
origin: SrcLocation,
},
SetNodeWallet {
wallet_id: PublicKey,
node_id: XorName,
},
GetTransferReplicaEvents {
msg_id: MessageId,
origin: SrcLocation,
},
/// Validate a transfer from a client
ValidateClientTransfer {
signed_transfer: SignedTransfer,
msg_id: MessageId,
origin: SrcLocation,
},
/// Register a transfer from a client
RegisterTransfer {
proof: TransferAgreementProof,
msg_id: MessageId,
origin: SrcLocation,
},
/// TEMP: Simulate a transfer from a client
SimulatePayout {
transfer: Transfer,
msg_id: MessageId,
origin: SrcLocation,
},
ReadChunk {
read: BlobRead,
msg_id: MessageId,
},
WriteChunk {
write: BlobWrite,
msg_id: MessageId,
client_signed: ClientSigned,
},
ProcessRepublish {
chunk: Blob,
msg_id: MessageId,
},
/// Run at data-section Elders on receiving the result of
/// read operations from Adults
RecordAdultReadLiveness {
response: QueryResponse,
correlation_id: MessageId,
src: XorName,
},
/// Get section elders.
GetSectionElders {
msg_id: MessageId,
origin: SrcLocation,
},
/// Get key transfers since specified version.
GetTransfersHistory {
/// The wallet key.
at: PublicKey,
/// The last version of transfers we know of.
since_version: usize,
msg_id: MessageId,
origin: SrcLocation,
},
/// Get Balance at a specific key
GetBalance {
at: PublicKey,
msg_id: MessageId,
origin: SrcLocation,
},
GetStoreCost {
/// Number of bytes to write.
bytes: u64,
msg_id: MessageId,
origin: SrcLocation,
},
/// Proposal of payout of rewards.
ReceiveRewardProposal(RewardProposal),
/// Accumulation of payout of rewards.
ReceiveRewardAccumulation(RewardAccumulation),
Genesis,
EldersChanged {
/// Our section prefix.
our_prefix: Prefix,
/// Our section public key.
our_key: PublicKey,
/// The new Elders.
new_elders: BTreeSet<XorName>,
/// Oldie or newbie?
newbie: bool,
},
AdultsChanged {
/// Remaining Adults in our section.
remaining: BTreeSet<XorName>,
/// New Adults in our section.
added: BTreeSet<XorName>,
/// Removed Adults in our section.
removed: BTreeSet<XorName>,
},
SectionSplit {
/// Our section prefix.
our_prefix: Prefix,
/// our section public key
our_key: PublicKey,
/// The new Elders of our section.
our_new_elders: BTreeSet<XorName>,
/// The new Elders of our sibling section.
their_new_elders: BTreeSet<XorName>,
/// The PK of the sibling section, as this event is fired during a split.
sibling_key: PublicKey,
/// oldie or newbie?
newbie: bool,
},
/// When demoted, node levels down
LevelDown,
/// Initiates the node with state from peers.
SynchState {
/// The registered wallet keys for nodes earning rewards
node_rewards: BTreeMap<XorName, (NodeAge, PublicKey)>,
/// The wallets of users on the network.
user_wallets: BTreeMap<PublicKey, ActorHistory>,
/// The metadata stored on Elders.
metadata: DataExchange,
},
/// As members are lost for various reasons
/// there are certain things nodes need
/// to do, to update for that.
ProcessLostMember {
name: XorName,
age: u8,
},
/// Storage reaching max capacity.
ReachingMaxCapacity,
/// Increment count of full nodes in the network
IncrementFullNodeCount {
/// Node ID of node that reached max capacity.
node_id: PublicKey,
},
/// Sets joining allowed to true or false.
SetNodeJoinsAllowed(bool),
/// Send a message to the specified dst.
Send(OutgoingMsg),
/// Send a lazy error as a result of a specific message.
/// The aim here is for the sender to respond with any missing state
SendError(OutgoingLazyError),
/// Send supporting info for a given processing error.
/// This should be any missing state required to proceed at the erring node.
SendSupport(OutgoingSupportingInfo),
/// Send the same request to each individual node.
SendToNodes {
msg: NodeMsg,
targets: BTreeSet<XorName>,
aggregation: Aggregation,
},
/// Process read of data
ProcessRead {
query: DataQuery,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
},
/// Process write of data
ProcessWrite {
cmd: DataCmd,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
},
/// Process Payment for a DataCmd
ProcessDataPayment {
msg: ProcessMsg,
origin: EndUser,
},
/// Receive a chunk that is being replicated.
/// This is run at an Adult (the new holder).
ReplicateChunk {
data: Blob,
msg_id: MessageId,
},
/// Create proposals to vote unresponsive nodes as offline
ProposeOffline(Vec<XorName>),
NoOp,
}
impl From<NodeDuty> for NodeDuties {
fn from(duty: NodeDuty) -> Self |
}
impl Debug for NodeDuty {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Genesis {.. } => write!(f, "Genesis"),
Self::GetNodeWalletKey {.. } => write!(f, "GetNodeWalletKey"),
Self::PropagateTransfer {.. } => write!(f, "PropagateTransfer"),
Self::SetNodeWallet {.. } => write!(f, "SetNodeWallet"),
Self::GetTransferReplicaEvents {.. } => write!(f, "GetTransferReplicaEvents"),
Self::ValidateClientTransfer {.. } => write!(f, "ValidateClientTransfer"),
Self::RegisterTransfer {.. } => write!(f, "RegisterTransfer"),
Self::GetBalance {.. } => write!(f, "GetBalance"),
Self::GetStoreCost {.. } => write!(f, "GetStoreCost"),
Self::SimulatePayout {.. } => write!(f, "SimulatePayout"),
Self::GetTransfersHistory {.. } => write!(f, "GetTransfersHistory"),
Self::ReadChunk {.. } => write!(f, "ReadChunk"),
Self::WriteChunk {.. } => write!(f, "WriteChunk"),
Self::ProcessRepublish {.. } => write!(f, "ProcessRepublish"),
Self::RecordAdultReadLiveness {
correlation_id,
response,
src,
} => write!(
f,
"RecordAdultReadLiveness {{ correlation_id: {}, response: {:?}, src: {} }}",
correlation_id, response, src
),
Self::ReceiveRewardProposal {.. } => write!(f, "ReceiveRewardProposal"),
Self::ReceiveRewardAccumulation {.. } => write!(f, "ReceiveRewardAccumulation"),
// ------
Self::LevelDown => write!(f, "LevelDown"),
Self::SynchState {.. } => write!(f, "SynchState"),
Self::EldersChanged {.. } => write!(f, "EldersChanged"),
Self::AdultsChanged {.. } => write!(f, "AdultsChanged"),
Self::SectionSplit {.. } => write!(f, "SectionSplit"),
Self::GetSectionElders {.. } => write!(f, "GetSectionElders"),
Self::NoOp => write!(f, "No op."),
Self::ReachingMaxCapacity => write!(f, "ReachingMaxCapacity"),
Self::ProcessLostMember {.. } => write!(f, "ProcessLostMember"),
//Self::ProcessRelocatedMember {.. } => write!(f, "ProcessRelocatedMember"),
Self::IncrementFullNodeCount {.. } => write!(f, "IncrementFullNodeCount"),
Self::SetNodeJoinsAllowed(_) => write!(f, "SetNodeJoinsAllowed"),
Self::Send(msg) => write!(f, "Send [ msg: {:?} ]", msg),
Self::SendError(msg) => write!(f, "SendError [ msg: {:?} ]", msg),
Self::SendSupport(msg) => write!(f, "SendSupport [ msg: {:?} ]", msg),
Self::SendToNodes {
msg,
targets,
aggregation,
} => write!(
f,
"SendToNodes [ msg: {:?}, targets: {:?}, aggregation: {:?} ]",
msg, targets, aggregation
),
Self::ProcessRead {.. } => write!(f, "ProcessRead"),
Self::ProcessWrite {.. } => write!(f, "ProcessWrite"),
Self::ProcessDataPayment {.. } => write!(f, "ProcessDataPayment"),
Self::ReplicateChunk {.. } => write!(f, "ReplicateChunk"),
Self::ProposeOffline(nodes) => write!(f, "ProposeOffline({:?})", nodes),
}
}
}
// --------------- Messaging ---------------
#[derive(Debug, Clone)]
pub struct OutgoingMsg {
pub msg: MsgType,
pub dst: DstLocation,
pub section_source: bool,
pub aggregation: Aggregation,
}
#[derive(Debug, Clone)]
#[allow(clippy::large_enum_variant)]
pub enum MsgType {
Node(NodeMsg),
Client(ClientMsg),
}
#[derive(Debug, Clone)]
pub struct OutgoingLazyError {
pub msg: ProcessingError,
pub dst: DstLocation,
}
#[derive(Debug, Clone)]
pub struct OutgoingSupportingInfo {
pub msg: SupportingInfo,
pub dst: DstLocation,
}
| {
if matches!(duty, NodeDuty::NoOp) {
vec![]
} else {
vec![duty]
}
} | identifier_body |
node_ops.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
#[cfg(feature = "simulated-payouts")]
use sn_data_types::Transfer;
use sn_data_types::{
ActorHistory, Blob, CreditAgreementProof, NodeAge, PublicKey, RewardAccumulation,
RewardProposal, SignedTransfer, TransferAgreementProof,
};
use sn_messaging::client::ClientMsg;
use sn_messaging::{
client::{
BlobRead, BlobWrite, ClientSigned, DataCmd, DataExchange, DataQuery, ProcessMsg,
ProcessingError, QueryResponse, SupportingInfo,
},
node::NodeMsg,
Aggregation, DstLocation, EndUser, MessageId, SrcLocation,
};
use sn_routing::Prefix;
use std::{
collections::{BTreeMap, BTreeSet},
fmt::{Debug, Formatter},
};
use xor_name::XorName;
/// Internal messages are what is passed along
/// within a node, between the entry point and
/// exit point of remote messages.
/// In other words, when communication from another
/// participant at the network arrives, it is mapped
/// to an internal message, that can
/// then be passed along to its proper processing module
/// at the node. At a node module, the result of such a call
/// is also an internal message.
/// Finally, an internal message might be destined for messaging
/// module, by which it leaves the process boundary of this node
/// and is sent on the wire to some other destination(s) on the network.
/// Vec of NodeDuty
pub type NodeDuties = Vec<NodeDuty>;
/// Common duties run by all nodes.
#[allow(clippy::large_enum_variant)]
pub enum NodeDuty {
GetNodeWalletKey {
node_name: XorName,
msg_id: MessageId,
origin: SrcLocation,
},
PropagateTransfer {
proof: CreditAgreementProof,
msg_id: MessageId,
origin: SrcLocation,
},
SetNodeWallet {
wallet_id: PublicKey,
node_id: XorName,
},
GetTransferReplicaEvents {
msg_id: MessageId,
origin: SrcLocation,
},
/// Validate a transfer from a client
ValidateClientTransfer {
signed_transfer: SignedTransfer,
msg_id: MessageId,
origin: SrcLocation,
},
/// Register a transfer from a client
RegisterTransfer {
proof: TransferAgreementProof,
msg_id: MessageId,
origin: SrcLocation,
},
/// TEMP: Simulate a transfer from a client
SimulatePayout {
transfer: Transfer,
msg_id: MessageId,
origin: SrcLocation,
},
ReadChunk {
read: BlobRead,
msg_id: MessageId,
},
WriteChunk {
write: BlobWrite,
msg_id: MessageId,
client_signed: ClientSigned,
},
ProcessRepublish {
chunk: Blob,
msg_id: MessageId,
},
/// Run at data-section Elders on receiving the result of
/// read operations from Adults
RecordAdultReadLiveness {
response: QueryResponse,
correlation_id: MessageId,
src: XorName,
},
/// Get section elders.
GetSectionElders {
msg_id: MessageId,
origin: SrcLocation,
},
/// Get key transfers since specified version.
GetTransfersHistory {
/// The wallet key.
at: PublicKey,
/// The last version of transfers we know of.
since_version: usize,
msg_id: MessageId,
origin: SrcLocation,
},
/// Get Balance at a specific key
GetBalance {
at: PublicKey,
msg_id: MessageId,
origin: SrcLocation,
},
GetStoreCost {
/// Number of bytes to write.
bytes: u64,
msg_id: MessageId,
origin: SrcLocation,
},
/// Proposal of payout of rewards.
ReceiveRewardProposal(RewardProposal),
/// Accumulation of payout of rewards.
ReceiveRewardAccumulation(RewardAccumulation),
Genesis,
EldersChanged {
/// Our section prefix.
our_prefix: Prefix,
/// Our section public key.
our_key: PublicKey,
/// The new Elders.
new_elders: BTreeSet<XorName>,
/// Oldie or newbie?
newbie: bool,
},
AdultsChanged {
/// Remaining Adults in our section.
remaining: BTreeSet<XorName>,
/// New Adults in our section.
added: BTreeSet<XorName>,
/// Removed Adults in our section.
removed: BTreeSet<XorName>,
},
SectionSplit {
/// Our section prefix.
our_prefix: Prefix,
/// our section public key
our_key: PublicKey,
/// The new Elders of our section.
our_new_elders: BTreeSet<XorName>,
/// The new Elders of our sibling section.
their_new_elders: BTreeSet<XorName>,
/// The PK of the sibling section, as this event is fired during a split.
sibling_key: PublicKey,
/// oldie or newbie?
newbie: bool,
},
/// When demoted, node levels down
LevelDown,
/// Initiates the node with state from peers.
SynchState {
/// The registered wallet keys for nodes earning rewards
node_rewards: BTreeMap<XorName, (NodeAge, PublicKey)>, | },
/// As members are lost for various reasons
/// there are certain things nodes need
/// to do, to update for that.
ProcessLostMember {
name: XorName,
age: u8,
},
/// Storage reaching max capacity.
ReachingMaxCapacity,
/// Increment count of full nodes in the network
IncrementFullNodeCount {
/// Node ID of node that reached max capacity.
node_id: PublicKey,
},
/// Sets joining allowed to true or false.
SetNodeJoinsAllowed(bool),
/// Send a message to the specified dst.
Send(OutgoingMsg),
/// Send a lazy error as a result of a specific message.
/// The aim here is for the sender to respond with any missing state
SendError(OutgoingLazyError),
/// Send supporting info for a given processing error.
/// This should be any missing state required to proceed at the erring node.
SendSupport(OutgoingSupportingInfo),
/// Send the same request to each individual node.
SendToNodes {
msg: NodeMsg,
targets: BTreeSet<XorName>,
aggregation: Aggregation,
},
/// Process read of data
ProcessRead {
query: DataQuery,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
},
/// Process write of data
ProcessWrite {
cmd: DataCmd,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
},
/// Process Payment for a DataCmd
ProcessDataPayment {
msg: ProcessMsg,
origin: EndUser,
},
/// Receive a chunk that is being replicated.
/// This is run at an Adult (the new holder).
ReplicateChunk {
data: Blob,
msg_id: MessageId,
},
/// Create proposals to vote unresponsive nodes as offline
ProposeOffline(Vec<XorName>),
NoOp,
}
impl From<NodeDuty> for NodeDuties {
fn from(duty: NodeDuty) -> Self {
if matches!(duty, NodeDuty::NoOp) {
vec![]
} else {
vec![duty]
}
}
}
impl Debug for NodeDuty {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Genesis {.. } => write!(f, "Genesis"),
Self::GetNodeWalletKey {.. } => write!(f, "GetNodeWalletKey"),
Self::PropagateTransfer {.. } => write!(f, "PropagateTransfer"),
Self::SetNodeWallet {.. } => write!(f, "SetNodeWallet"),
Self::GetTransferReplicaEvents {.. } => write!(f, "GetTransferReplicaEvents"),
Self::ValidateClientTransfer {.. } => write!(f, "ValidateClientTransfer"),
Self::RegisterTransfer {.. } => write!(f, "RegisterTransfer"),
Self::GetBalance {.. } => write!(f, "GetBalance"),
Self::GetStoreCost {.. } => write!(f, "GetStoreCost"),
Self::SimulatePayout {.. } => write!(f, "SimulatePayout"),
Self::GetTransfersHistory {.. } => write!(f, "GetTransfersHistory"),
Self::ReadChunk {.. } => write!(f, "ReadChunk"),
Self::WriteChunk {.. } => write!(f, "WriteChunk"),
Self::ProcessRepublish {.. } => write!(f, "ProcessRepublish"),
Self::RecordAdultReadLiveness {
correlation_id,
response,
src,
} => write!(
f,
"RecordAdultReadLiveness {{ correlation_id: {}, response: {:?}, src: {} }}",
correlation_id, response, src
),
Self::ReceiveRewardProposal {.. } => write!(f, "ReceiveRewardProposal"),
Self::ReceiveRewardAccumulation {.. } => write!(f, "ReceiveRewardAccumulation"),
// ------
Self::LevelDown => write!(f, "LevelDown"),
Self::SynchState {.. } => write!(f, "SynchState"),
Self::EldersChanged {.. } => write!(f, "EldersChanged"),
Self::AdultsChanged {.. } => write!(f, "AdultsChanged"),
Self::SectionSplit {.. } => write!(f, "SectionSplit"),
Self::GetSectionElders {.. } => write!(f, "GetSectionElders"),
Self::NoOp => write!(f, "No op."),
Self::ReachingMaxCapacity => write!(f, "ReachingMaxCapacity"),
Self::ProcessLostMember {.. } => write!(f, "ProcessLostMember"),
//Self::ProcessRelocatedMember {.. } => write!(f, "ProcessRelocatedMember"),
Self::IncrementFullNodeCount {.. } => write!(f, "IncrementFullNodeCount"),
Self::SetNodeJoinsAllowed(_) => write!(f, "SetNodeJoinsAllowed"),
Self::Send(msg) => write!(f, "Send [ msg: {:?} ]", msg),
Self::SendError(msg) => write!(f, "SendError [ msg: {:?} ]", msg),
Self::SendSupport(msg) => write!(f, "SendSupport [ msg: {:?} ]", msg),
Self::SendToNodes {
msg,
targets,
aggregation,
} => write!(
f,
"SendToNodes [ msg: {:?}, targets: {:?}, aggregation: {:?} ]",
msg, targets, aggregation
),
Self::ProcessRead {.. } => write!(f, "ProcessRead"),
Self::ProcessWrite {.. } => write!(f, "ProcessWrite"),
Self::ProcessDataPayment {.. } => write!(f, "ProcessDataPayment"),
Self::ReplicateChunk {.. } => write!(f, "ReplicateChunk"),
Self::ProposeOffline(nodes) => write!(f, "ProposeOffline({:?})", nodes),
}
}
}
// --------------- Messaging ---------------
#[derive(Debug, Clone)]
pub struct OutgoingMsg {
pub msg: MsgType,
pub dst: DstLocation,
pub section_source: bool,
pub aggregation: Aggregation,
}
#[derive(Debug, Clone)]
#[allow(clippy::large_enum_variant)]
pub enum MsgType {
Node(NodeMsg),
Client(ClientMsg),
}
#[derive(Debug, Clone)]
pub struct OutgoingLazyError {
pub msg: ProcessingError,
pub dst: DstLocation,
}
#[derive(Debug, Clone)]
pub struct OutgoingSupportingInfo {
pub msg: SupportingInfo,
pub dst: DstLocation,
} | /// The wallets of users on the network.
user_wallets: BTreeMap<PublicKey, ActorHistory>,
/// The metadata stored on Elders.
metadata: DataExchange, | random_line_split |
cabi.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, ValueRef, Attribute, Void};
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::common::*;
use middle::trans::type_::Type;
use std::libc::c_uint;
use std::option;
pub trait ABIInfo {
fn compute_info(&self, atys: &[Type], rty: Type, ret_def: bool) -> FnType;
}
pub struct LLVMType {
cast: bool,
ty: Type
}
pub struct FnType {
arg_tys: ~[LLVMType],
ret_ty: LLVMType,
attrs: ~[option::Option<Attribute>],
sret: bool
}
impl FnType {
pub fn decl_fn(&self, decl: &fn(fnty: Type) -> ValueRef) -> ValueRef {
let atys = self.arg_tys.iter().transform(|t| t.ty).collect::<~[Type]>();
let rty = self.ret_ty.ty;
let fnty = Type::func(atys, &rty);
let llfn = decl(fnty); | match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
}
pub fn build_shim_args(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef)
-> ~[ValueRef] {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut llargvals = ~[];
let mut i = 0u;
let n = arg_tys.len();
if self.sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = atys.tail();
attrs = attrs.tail();
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr, atys[i].ty.ptr_to());
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
return llargvals;
}
pub fn build_shim_ret(&self, bcx: block, arg_tys: &[Type], ret_def: bool,
llargbundle: ValueRef, llretval: ValueRef) {
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(llretval, (i + 1u) as c_uint, attr as c_uint);
}
}
_ => ()
}
}
if self.sret ||!ret_def {
return;
}
let n = arg_tys.len();
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
if self.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc, self.ret_ty.ty.ptr_to());
// *args->r = r;
Store(bcx, llretval, tmp_ptr);
} else {
// *args->r = r;
Store(bcx, llretval, llretloc);
};
}
pub fn build_wrap_args(&self, bcx: block, ret_ty: Type,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut j = 0u;
let llretptr = if self.sret {
atys = atys.tail();
attrs = attrs.tail();
j = 1u;
get_param(llwrapfn, 0u)
} else if self.ret_ty.cast {
let retptr = alloca(bcx, self.ret_ty.ty, "");
BitCast(bcx, retptr, ret_ty.ptr_to())
} else {
alloca(bcx, ret_ty, "")
};
let mut i = 0u;
let n = atys.len();
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle, [0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr, atys[i].ty.ptr_to());
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle, [0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
pub fn build_wrap_ret(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef) {
if self.ret_ty.ty.kind() == Void {
return;
}
if bcx.fcx.llretptr.is_some() {
let llretval = load_inbounds(bcx, llargbundle, [ 0, arg_tys.len() ]);
let llretval = if self.ret_ty.cast {
let retptr = BitCast(bcx, llretval, self.ret_ty.ty.ptr_to());
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
let llretptr = BitCast(bcx, bcx.fcx.llretptr.get(), self.ret_ty.ty.ptr_to());
Store(bcx, llretval, llretptr);
}
}
} |
for self.attrs.iter().enumerate().advance |(i, a)| { | random_line_split |
cabi.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, ValueRef, Attribute, Void};
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::common::*;
use middle::trans::type_::Type;
use std::libc::c_uint;
use std::option;
pub trait ABIInfo {
fn compute_info(&self, atys: &[Type], rty: Type, ret_def: bool) -> FnType;
}
pub struct LLVMType {
cast: bool,
ty: Type
}
pub struct FnType {
arg_tys: ~[LLVMType],
ret_ty: LLVMType,
attrs: ~[option::Option<Attribute>],
sret: bool
}
impl FnType {
pub fn decl_fn(&self, decl: &fn(fnty: Type) -> ValueRef) -> ValueRef |
pub fn build_shim_args(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef)
-> ~[ValueRef] {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut llargvals = ~[];
let mut i = 0u;
let n = arg_tys.len();
if self.sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = atys.tail();
attrs = attrs.tail();
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr, atys[i].ty.ptr_to());
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
return llargvals;
}
pub fn build_shim_ret(&self, bcx: block, arg_tys: &[Type], ret_def: bool,
llargbundle: ValueRef, llretval: ValueRef) {
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(llretval, (i + 1u) as c_uint, attr as c_uint);
}
}
_ => ()
}
}
if self.sret ||!ret_def {
return;
}
let n = arg_tys.len();
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
if self.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc, self.ret_ty.ty.ptr_to());
// *args->r = r;
Store(bcx, llretval, tmp_ptr);
} else {
// *args->r = r;
Store(bcx, llretval, llretloc);
};
}
pub fn build_wrap_args(&self, bcx: block, ret_ty: Type,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut j = 0u;
let llretptr = if self.sret {
atys = atys.tail();
attrs = attrs.tail();
j = 1u;
get_param(llwrapfn, 0u)
} else if self.ret_ty.cast {
let retptr = alloca(bcx, self.ret_ty.ty, "");
BitCast(bcx, retptr, ret_ty.ptr_to())
} else {
alloca(bcx, ret_ty, "")
};
let mut i = 0u;
let n = atys.len();
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle, [0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr, atys[i].ty.ptr_to());
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle, [0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
pub fn build_wrap_ret(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef) {
if self.ret_ty.ty.kind() == Void {
return;
}
if bcx.fcx.llretptr.is_some() {
let llretval = load_inbounds(bcx, llargbundle, [ 0, arg_tys.len() ]);
let llretval = if self.ret_ty.cast {
let retptr = BitCast(bcx, llretval, self.ret_ty.ty.ptr_to());
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
let llretptr = BitCast(bcx, bcx.fcx.llretptr.get(), self.ret_ty.ty.ptr_to());
Store(bcx, llretval, llretptr);
}
}
}
| {
let atys = self.arg_tys.iter().transform(|t| t.ty).collect::<~[Type]>();
let rty = self.ret_ty.ty;
let fnty = Type::func(atys, &rty);
let llfn = decl(fnty);
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
} | identifier_body |
cabi.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, ValueRef, Attribute, Void};
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::common::*;
use middle::trans::type_::Type;
use std::libc::c_uint;
use std::option;
pub trait ABIInfo {
fn compute_info(&self, atys: &[Type], rty: Type, ret_def: bool) -> FnType;
}
pub struct | {
cast: bool,
ty: Type
}
pub struct FnType {
arg_tys: ~[LLVMType],
ret_ty: LLVMType,
attrs: ~[option::Option<Attribute>],
sret: bool
}
impl FnType {
pub fn decl_fn(&self, decl: &fn(fnty: Type) -> ValueRef) -> ValueRef {
let atys = self.arg_tys.iter().transform(|t| t.ty).collect::<~[Type]>();
let rty = self.ret_ty.ty;
let fnty = Type::func(atys, &rty);
let llfn = decl(fnty);
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
}
pub fn build_shim_args(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef)
-> ~[ValueRef] {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut llargvals = ~[];
let mut i = 0u;
let n = arg_tys.len();
if self.sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = atys.tail();
attrs = attrs.tail();
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr, atys[i].ty.ptr_to());
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
return llargvals;
}
pub fn build_shim_ret(&self, bcx: block, arg_tys: &[Type], ret_def: bool,
llargbundle: ValueRef, llretval: ValueRef) {
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(llretval, (i + 1u) as c_uint, attr as c_uint);
}
}
_ => ()
}
}
if self.sret ||!ret_def {
return;
}
let n = arg_tys.len();
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
if self.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc, self.ret_ty.ty.ptr_to());
// *args->r = r;
Store(bcx, llretval, tmp_ptr);
} else {
// *args->r = r;
Store(bcx, llretval, llretloc);
};
}
pub fn build_wrap_args(&self, bcx: block, ret_ty: Type,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut j = 0u;
let llretptr = if self.sret {
atys = atys.tail();
attrs = attrs.tail();
j = 1u;
get_param(llwrapfn, 0u)
} else if self.ret_ty.cast {
let retptr = alloca(bcx, self.ret_ty.ty, "");
BitCast(bcx, retptr, ret_ty.ptr_to())
} else {
alloca(bcx, ret_ty, "")
};
let mut i = 0u;
let n = atys.len();
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle, [0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr, atys[i].ty.ptr_to());
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle, [0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
pub fn build_wrap_ret(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef) {
if self.ret_ty.ty.kind() == Void {
return;
}
if bcx.fcx.llretptr.is_some() {
let llretval = load_inbounds(bcx, llargbundle, [ 0, arg_tys.len() ]);
let llretval = if self.ret_ty.cast {
let retptr = BitCast(bcx, llretval, self.ret_ty.ty.ptr_to());
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
let llretptr = BitCast(bcx, bcx.fcx.llretptr.get(), self.ret_ty.ty.ptr_to());
Store(bcx, llretval, llretptr);
}
}
}
| LLVMType | identifier_name |
cabi.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::{llvm, ValueRef, Attribute, Void};
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::common::*;
use middle::trans::type_::Type;
use std::libc::c_uint;
use std::option;
pub trait ABIInfo {
fn compute_info(&self, atys: &[Type], rty: Type, ret_def: bool) -> FnType;
}
pub struct LLVMType {
cast: bool,
ty: Type
}
pub struct FnType {
arg_tys: ~[LLVMType],
ret_ty: LLVMType,
attrs: ~[option::Option<Attribute>],
sret: bool
}
impl FnType {
pub fn decl_fn(&self, decl: &fn(fnty: Type) -> ValueRef) -> ValueRef {
let atys = self.arg_tys.iter().transform(|t| t.ty).collect::<~[Type]>();
let rty = self.ret_ty.ty;
let fnty = Type::func(atys, &rty);
let llfn = decl(fnty);
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
let llarg = get_param(llfn, i);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
}
_ => ()
}
}
return llfn;
}
pub fn build_shim_args(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef)
-> ~[ValueRef] {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut llargvals = ~[];
let mut i = 0u;
let n = arg_tys.len();
if self.sret {
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
let llretloc = Load(bcx, llretptr);
llargvals = ~[llretloc];
atys = atys.tail();
attrs = attrs.tail();
}
while i < n {
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle, [0u, i]);
let arg_ptr = BitCast(bcx, arg_ptr, atys[i].ty.ptr_to());
Load(bcx, arg_ptr)
} else if attrs[i].is_some() {
GEPi(bcx, llargbundle, [0u, i])
} else {
load_inbounds(bcx, llargbundle, [0u, i])
};
llargvals.push(llargval);
i += 1u;
}
return llargvals;
}
pub fn build_shim_ret(&self, bcx: block, arg_tys: &[Type], ret_def: bool,
llargbundle: ValueRef, llretval: ValueRef) {
for self.attrs.iter().enumerate().advance |(i, a)| {
match *a {
option::Some(attr) => {
unsafe {
llvm::LLVMAddInstrAttribute(llretval, (i + 1u) as c_uint, attr as c_uint);
}
}
_ => ()
}
}
if self.sret ||!ret_def {
return;
}
let n = arg_tys.len();
// R** llretptr = &args->r;
let llretptr = GEPi(bcx, llargbundle, [0u, n]);
// R* llretloc = *llretptr; /* (args->r) */
let llretloc = Load(bcx, llretptr);
if self.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc, self.ret_ty.ty.ptr_to());
// *args->r = r;
Store(bcx, llretval, tmp_ptr);
} else {
// *args->r = r;
Store(bcx, llretval, llretloc);
};
}
pub fn build_wrap_args(&self, bcx: block, ret_ty: Type,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let mut atys: &[LLVMType] = self.arg_tys;
let mut attrs: &[option::Option<Attribute>] = self.attrs;
let mut j = 0u;
let llretptr = if self.sret {
atys = atys.tail();
attrs = attrs.tail();
j = 1u;
get_param(llwrapfn, 0u)
} else if self.ret_ty.cast {
let retptr = alloca(bcx, self.ret_ty.ty, "");
BitCast(bcx, retptr, ret_ty.ptr_to())
} else | ;
let mut i = 0u;
let n = atys.len();
while i < n {
let mut argval = get_param(llwrapfn, i + j);
if attrs[i].is_some() {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle, [0u, i]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle, [0u, i]);
let argptr = BitCast(bcx, argptr, atys[i].ty.ptr_to());
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle, [0u, i]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0u, n]);
}
pub fn build_wrap_ret(&self, bcx: block, arg_tys: &[Type], llargbundle: ValueRef) {
if self.ret_ty.ty.kind() == Void {
return;
}
if bcx.fcx.llretptr.is_some() {
let llretval = load_inbounds(bcx, llargbundle, [ 0, arg_tys.len() ]);
let llretval = if self.ret_ty.cast {
let retptr = BitCast(bcx, llretval, self.ret_ty.ty.ptr_to());
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
let llretptr = BitCast(bcx, bcx.fcx.llretptr.get(), self.ret_ty.ty.ptr_to());
Store(bcx, llretval, llretptr);
}
}
}
| {
alloca(bcx, ret_ty, "")
} | conditional_block |
subst.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type substitutions.
use middle::ty;
use util::ppaux::Repr;
///////////////////////////////////////////////////////////////////////////
// Public trait `Subst`
//
// Just call `foo.subst(tcx, substs)` to perform a substitution across
// `foo`.
pub trait Subst {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Self;
}
///////////////////////////////////////////////////////////////////////////
// Substitution over types
//
// Because this is so common, we make a special optimization to avoid
// doing anything if `substs` is a no-op. I tried to generalize these
// to all subst methods but ran into trouble due to the limitations of
// our current method/trait matching algorithm. - Niko
trait EffectfulSubst {
fn effectfulSubst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Self;
}
impl Subst for ty::t {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::t {
if ty::substs_is_noop(substs) {
return *self;
} else {
return self.effectfulSubst(tcx, substs);
}
}
}
impl EffectfulSubst for ty::t {
fn effectfulSubst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::t {
if!ty::type_needs_subst(*self) {
return *self;
}
match ty::get(*self).sty {
ty::ty_param(p) => {
substs.tps[p.idx]
}
ty::ty_self(_) => {
substs.self_ty.expect("ty_self not found in substs")
}
_ => {
ty::fold_regions_and_ty(
tcx, *self,
|r| r.subst(tcx, substs),
|t| t.effectfulSubst(tcx, substs),
|t| t.effectfulSubst(tcx, substs))
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// Other types
impl<T:Subst> Subst for ~[T] {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ~[T] {
self.map(|t| t.subst(tcx, substs))
}
}
impl<T:Subst> Subst for @T {
fn | (&self, tcx: ty::ctxt, substs: &ty::substs) -> @T {
match self {
&@ref t => @t.subst(tcx, substs)
}
}
}
impl<T:Subst> Subst for Option<T> {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Option<T> {
self.map(|t| t.subst(tcx, substs))
}
}
impl Subst for ty::TraitRef {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::TraitRef {
ty::TraitRef {
def_id: self.def_id,
substs: self.substs.subst(tcx, substs)
}
}
}
impl Subst for ty::substs {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::substs {
ty::substs {
self_r: self.self_r.subst(tcx, substs),
self_ty: self.self_ty.map(|typ| typ.subst(tcx, substs)),
tps: self.tps.map(|typ| typ.subst(tcx, substs))
}
}
}
impl Subst for ty::BareFnTy {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::BareFnTy {
ty::fold_bare_fn_ty(self, |t| t.subst(tcx, substs))
}
}
impl Subst for ty::ParamBounds {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::ParamBounds {
ty::ParamBounds {
builtin_bounds: self.builtin_bounds,
trait_bounds: self.trait_bounds.subst(tcx, substs)
}
}
}
impl Subst for ty::TypeParameterDef {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::TypeParameterDef {
ty::TypeParameterDef {
def_id: self.def_id,
bounds: self.bounds.subst(tcx, substs)
}
}
}
impl Subst for ty::Generics {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::Generics {
ty::Generics {
type_param_defs: self.type_param_defs.subst(tcx, substs),
region_param: self.region_param
}
}
}
impl Subst for ty::Region {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::Region {
// Note: This routine only handles the self region, because it
// is only concerned with substitutions of regions that appear
// in types. Region substitution of the bound regions that
// appear in a function signature is done using the
// specialized routine
// `middle::typeck::check::regionmanip::replace_bound_regions_in_fn_sig()`.
// As we transition to the new region syntax this distinction
// will most likely disappear.
match self {
&ty::re_bound(ty::br_self) => {
match substs.self_r {
None => {
tcx.sess.bug(
fmt!("ty::Region#subst(): \
Reference to self region when \
given substs with no self region: %s",
substs.repr(tcx)));
}
Some(self_r) => self_r
}
}
_ => *self
}
}
}
impl Subst for ty::ty_param_bounds_and_ty {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::ty_param_bounds_and_ty {
ty::ty_param_bounds_and_ty {
generics: self.generics.subst(tcx, substs),
ty: self.ty.subst(tcx, substs)
}
}
}
| subst | identifier_name |
subst.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type substitutions.
use middle::ty;
use util::ppaux::Repr;
///////////////////////////////////////////////////////////////////////////
// Public trait `Subst`
//
// Just call `foo.subst(tcx, substs)` to perform a substitution across
// `foo`.
pub trait Subst {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Self;
}
///////////////////////////////////////////////////////////////////////////
// Substitution over types
//
// Because this is so common, we make a special optimization to avoid
// doing anything if `substs` is a no-op. I tried to generalize these
// to all subst methods but ran into trouble due to the limitations of
// our current method/trait matching algorithm. - Niko
trait EffectfulSubst {
fn effectfulSubst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Self;
}
impl Subst for ty::t {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::t {
if ty::substs_is_noop(substs) {
return *self;
} else {
return self.effectfulSubst(tcx, substs);
}
}
}
impl EffectfulSubst for ty::t {
fn effectfulSubst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::t | }
}
///////////////////////////////////////////////////////////////////////////
// Other types
impl<T:Subst> Subst for ~[T] {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ~[T] {
self.map(|t| t.subst(tcx, substs))
}
}
impl<T:Subst> Subst for @T {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> @T {
match self {
&@ref t => @t.subst(tcx, substs)
}
}
}
impl<T:Subst> Subst for Option<T> {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Option<T> {
self.map(|t| t.subst(tcx, substs))
}
}
impl Subst for ty::TraitRef {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::TraitRef {
ty::TraitRef {
def_id: self.def_id,
substs: self.substs.subst(tcx, substs)
}
}
}
impl Subst for ty::substs {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::substs {
ty::substs {
self_r: self.self_r.subst(tcx, substs),
self_ty: self.self_ty.map(|typ| typ.subst(tcx, substs)),
tps: self.tps.map(|typ| typ.subst(tcx, substs))
}
}
}
impl Subst for ty::BareFnTy {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::BareFnTy {
ty::fold_bare_fn_ty(self, |t| t.subst(tcx, substs))
}
}
impl Subst for ty::ParamBounds {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::ParamBounds {
ty::ParamBounds {
builtin_bounds: self.builtin_bounds,
trait_bounds: self.trait_bounds.subst(tcx, substs)
}
}
}
impl Subst for ty::TypeParameterDef {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::TypeParameterDef {
ty::TypeParameterDef {
def_id: self.def_id,
bounds: self.bounds.subst(tcx, substs)
}
}
}
impl Subst for ty::Generics {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::Generics {
ty::Generics {
type_param_defs: self.type_param_defs.subst(tcx, substs),
region_param: self.region_param
}
}
}
impl Subst for ty::Region {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::Region {
// Note: This routine only handles the self region, because it
// is only concerned with substitutions of regions that appear
// in types. Region substitution of the bound regions that
// appear in a function signature is done using the
// specialized routine
// `middle::typeck::check::regionmanip::replace_bound_regions_in_fn_sig()`.
// As we transition to the new region syntax this distinction
// will most likely disappear.
match self {
&ty::re_bound(ty::br_self) => {
match substs.self_r {
None => {
tcx.sess.bug(
fmt!("ty::Region#subst(): \
Reference to self region when \
given substs with no self region: %s",
substs.repr(tcx)));
}
Some(self_r) => self_r
}
}
_ => *self
}
}
}
impl Subst for ty::ty_param_bounds_and_ty {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::ty_param_bounds_and_ty {
ty::ty_param_bounds_and_ty {
generics: self.generics.subst(tcx, substs),
ty: self.ty.subst(tcx, substs)
}
}
}
| {
if !ty::type_needs_subst(*self) {
return *self;
}
match ty::get(*self).sty {
ty::ty_param(p) => {
substs.tps[p.idx]
}
ty::ty_self(_) => {
substs.self_ty.expect("ty_self not found in substs")
}
_ => {
ty::fold_regions_and_ty(
tcx, *self,
|r| r.subst(tcx, substs),
|t| t.effectfulSubst(tcx, substs),
|t| t.effectfulSubst(tcx, substs))
}
} | identifier_body |
subst.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type substitutions.
use middle::ty;
use util::ppaux::Repr;
///////////////////////////////////////////////////////////////////////////
// Public trait `Subst`
//
// Just call `foo.subst(tcx, substs)` to perform a substitution across
// `foo`.
pub trait Subst {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Self;
}
///////////////////////////////////////////////////////////////////////////
// Substitution over types
//
// Because this is so common, we make a special optimization to avoid
// doing anything if `substs` is a no-op. I tried to generalize these
// to all subst methods but ran into trouble due to the limitations of
// our current method/trait matching algorithm. - Niko
trait EffectfulSubst {
fn effectfulSubst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Self;
}
impl Subst for ty::t {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::t {
if ty::substs_is_noop(substs) {
return *self;
} else {
return self.effectfulSubst(tcx, substs);
}
}
}
impl EffectfulSubst for ty::t {
fn effectfulSubst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::t {
if!ty::type_needs_subst(*self) {
return *self;
}
match ty::get(*self).sty {
ty::ty_param(p) => {
substs.tps[p.idx]
}
ty::ty_self(_) => {
substs.self_ty.expect("ty_self not found in substs")
}
_ => {
ty::fold_regions_and_ty(
tcx, *self,
|r| r.subst(tcx, substs),
|t| t.effectfulSubst(tcx, substs),
|t| t.effectfulSubst(tcx, substs)) |
///////////////////////////////////////////////////////////////////////////
// Other types
impl<T:Subst> Subst for ~[T] {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ~[T] {
self.map(|t| t.subst(tcx, substs))
}
}
impl<T:Subst> Subst for @T {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> @T {
match self {
&@ref t => @t.subst(tcx, substs)
}
}
}
impl<T:Subst> Subst for Option<T> {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> Option<T> {
self.map(|t| t.subst(tcx, substs))
}
}
impl Subst for ty::TraitRef {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::TraitRef {
ty::TraitRef {
def_id: self.def_id,
substs: self.substs.subst(tcx, substs)
}
}
}
impl Subst for ty::substs {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::substs {
ty::substs {
self_r: self.self_r.subst(tcx, substs),
self_ty: self.self_ty.map(|typ| typ.subst(tcx, substs)),
tps: self.tps.map(|typ| typ.subst(tcx, substs))
}
}
}
impl Subst for ty::BareFnTy {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::BareFnTy {
ty::fold_bare_fn_ty(self, |t| t.subst(tcx, substs))
}
}
impl Subst for ty::ParamBounds {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::ParamBounds {
ty::ParamBounds {
builtin_bounds: self.builtin_bounds,
trait_bounds: self.trait_bounds.subst(tcx, substs)
}
}
}
impl Subst for ty::TypeParameterDef {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::TypeParameterDef {
ty::TypeParameterDef {
def_id: self.def_id,
bounds: self.bounds.subst(tcx, substs)
}
}
}
impl Subst for ty::Generics {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::Generics {
ty::Generics {
type_param_defs: self.type_param_defs.subst(tcx, substs),
region_param: self.region_param
}
}
}
impl Subst for ty::Region {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::Region {
// Note: This routine only handles the self region, because it
// is only concerned with substitutions of regions that appear
// in types. Region substitution of the bound regions that
// appear in a function signature is done using the
// specialized routine
// `middle::typeck::check::regionmanip::replace_bound_regions_in_fn_sig()`.
// As we transition to the new region syntax this distinction
// will most likely disappear.
match self {
&ty::re_bound(ty::br_self) => {
match substs.self_r {
None => {
tcx.sess.bug(
fmt!("ty::Region#subst(): \
Reference to self region when \
given substs with no self region: %s",
substs.repr(tcx)));
}
Some(self_r) => self_r
}
}
_ => *self
}
}
}
impl Subst for ty::ty_param_bounds_and_ty {
fn subst(&self, tcx: ty::ctxt, substs: &ty::substs) -> ty::ty_param_bounds_and_ty {
ty::ty_param_bounds_and_ty {
generics: self.generics.subst(tcx, substs),
ty: self.ty.subst(tcx, substs)
}
}
} | }
}
}
} | random_line_split |
volume.rs | use crate::{fov::SphereVolumeFov, Location, World};
use calx::HexFov;
use std::iter::FromIterator;
impl World {
pub fn sphere_volume(&self, origin: Location, radius: u32) -> Volume {
Volume::sphere(self, origin, radius)
}
}
/// `Volume` is a specific area of the game world.
pub struct Volume(pub Vec<Location>);
impl Volume {
/// Create a volume that consists of a single point.
pub fn | (loc: Location) -> Volume { Volume(vec![loc]) }
/// Construct a sphere volume that follows portals and is stopped by walls.
///
/// The stopping walls are terrain for which `blocks_shot` is true.
pub fn sphere(w: &World, origin: Location, radius: u32) -> Volume {
// TODO: Add stop predicate to API, allow passing through walls.
Volume(Vec::from_iter(
HexFov::new(SphereVolumeFov::new(w, radius, origin)).map(|(pos, a)| a.origin + pos),
))
}
}
| point | identifier_name |
volume.rs | use crate::{fov::SphereVolumeFov, Location, World};
use calx::HexFov;
use std::iter::FromIterator;
impl World {
pub fn sphere_volume(&self, origin: Location, radius: u32) -> Volume {
Volume::sphere(self, origin, radius)
}
}
/// `Volume` is a specific area of the game world.
pub struct Volume(pub Vec<Location>);
impl Volume {
/// Create a volume that consists of a single point. | ///
/// The stopping walls are terrain for which `blocks_shot` is true.
pub fn sphere(w: &World, origin: Location, radius: u32) -> Volume {
// TODO: Add stop predicate to API, allow passing through walls.
Volume(Vec::from_iter(
HexFov::new(SphereVolumeFov::new(w, radius, origin)).map(|(pos, a)| a.origin + pos),
))
}
} | pub fn point(loc: Location) -> Volume { Volume(vec![loc]) }
/// Construct a sphere volume that follows portals and is stopped by walls. | random_line_split |
drop_flag.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Drop flags.
//!
//! The [`Pin<P>`] guarantees state that if we have a `T` allocated somewhere,
//! and we construct a pinned reference to it such as a `Pin<&'a mut T>`, then
//! before that "somewhere" in memory is reused by another Rust object, `T`'s
//! destructor must run.
//!
//! Normally, this isn't a problem for Rust code, since the storage of an object
//! is destroyed immediately after it is destroyed. [`DerefMove`], however,
//! breaks this expectation: it separates the destructors from its storage and
//! contents into two separately destroyed objects: a [`DerefMove::Storage`]
//! and a [`MoveRef`]. If the [`MoveRef`] is [`mem::forget`]'ed, we lose: the
//! storage will potentially be re-used.
//!
//! Therefore, we must somehow detect that [`MoveRef`]s fail to be destroyed
//! when the destructor for the corresponding storage is run, and remediate it,
//! either by leaking heap storage or aborting if we would free stack storage
//! (a panic is insufficient, since that location can be reused if the panic is
//! caught).
//!
//! A [`DropFlag`] allows us to achieve this. It is a generalized, library-level
//! version of the Rust language's drop flags, which it uses to dynamically
//! determine whether to run destructors of stack-allocated values that might
//! have been moved from. Unlike Rust language drop flags, a [`DropFlag`] is
//! actually a counter, rather than a boolean. This allows storage that holds
//! many objects, like a vector, ensure that all contents have been properly
//! destroyed.
//!
//! This module also provides two helper types simplify safe creation and
//! management of drop flags.
//!
//! See the [Rustonomicon entry](https://doc.rust-lang.org/nomicon/drop-flags.html)
//! for the Rust language equivalent.
//!
//! # Safety
//!
//! No function in this module is `unsafe`: instead, functions that construct
//! [`MoveRef`]s out of [`DropFlag`]s are `unsafe`, and their callers are
//! responsible for ensuring that the passed-in [`DropFlag`] helps uphold the
//! relevant invariants.
use core::cell::Cell;
use core::mem;
use core::mem::ManuallyDrop;
use core::ops::Deref;
use core::ops::DerefMut;
#[cfg(doc)]
use {
crate::move_ref::{DerefMove, MoveRef},
alloc::boxed::Box,
core::pin::Pin,
};
/// A drop flag, for tracking successful destruction.
///
/// A `DropFlag` is a reference to a counter somewhere on the stack that lives
/// adjacent to storage for some value. It is just a counter: `unsafe` code is
/// expected to associate semantic meaning to it.
///
/// A flag with a value of zero is usually called "dead", and setting a flag to
/// the dead state is called clearing it.
///
/// See the [module documentation][self] for more information.
#[derive(Clone, Copy)]
pub struct DropFlag<'frame> {
counter: &'frame Cell<usize>,
}
impl DropFlag<'_> {
/// Increments the internal counter.
///
/// This function does not provide any overflow protection; `unsafe` code is
/// responsible for making sure that cannot happen.
#[inline]
pub fn inc(self) {
self.counter.set(self.counter.get() + 1)
}
/// Decrements the internal counter and returns true if it became zero.
///
/// This function will return `false` if the counter was already zero.
#[inline]
pub fn dec_and_check_if_died(self) -> bool {
if self.counter.get() == 0 {
return false;
}
self.counter.set(self.counter.get() - 1);
self.is_dead()
}
/// Returns whether the internal counter is zero.
#[inline]
pub fn is_dead(self) -> bool {
self.counter.get() == 0
}
/// Lengthens the lifetime of `self`.
#[inline]
#[allow(unused)]
pub(crate) unsafe fn longer_lifetime<'a>(self) -> DropFlag<'a> {
DropFlag {
counter: mem::transmute(self.counter),
}
}
}
/// A wrapper for managing when a value gets dropped via a [`DropFlag`].
///
/// This type tracks the destruction state of some value relative to another
/// value via its [`DropFlag`]: for example, it might be the storage of a value
/// wrapped up in a [`MoveRef`]. When a `DroppingFlag` is destroyed, it will
/// run the destructor for the wrapped value if and only if the [`DropFlag`]
/// is dead.
///
/// This type can be viewed as using a [`DropFlag`] to "complete" a
/// [`ManuallyDrop<T>`] by explicitly tracking whether it has been dropped. The
/// flag can be used to signal whether to destroy or leak the value, but the
/// destruction occurs lazily rather than immediately when the flag is flipped.
///
/// This is useful as a [`DerefMove::Storage`] type for types where the
/// storage should be leaked if the inner type was somehow not destroyed, such
/// as in the case of heap-allocated storage like [`Box<T>`].
pub struct DroppingFlag<T> {
value: ManuallyDrop<T>,
counter: Cell<usize>,
}
impl<T> DroppingFlag<T> {
/// Wraps a new value to have its drop state managed by a `DropFlag`.
///
/// The drop flag will start out dead and needs to be manually incremented.
pub fn new(value: T) -> Self {
Self {
value: ManuallyDrop::new(value),
counter: Cell::new(0),
}
}
/// Gets a reference to the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again.
pub fn flag(slot: &Self) -> DropFlag {
DropFlag {
counter: &slot.counter,
}
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts(slot: &Self) -> (&T, DropFlag) {
(
&slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn | (slot: &mut Self) -> (&mut T, DropFlag) {
(
&mut slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
}
impl<T> Deref for DroppingFlag<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for DroppingFlag<T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T> Drop for DroppingFlag<T> {
fn drop(&mut self) {
if Self::flag(self).is_dead() {
unsafe {
ManuallyDrop::drop(&mut self.value);
}
}
}
}
/// An RAII trap that ensures a drop flag is correctly cleared.
///
/// This type is *similar* to a [`DroppingFlag`], except that it does not wrap
/// a value and rather than leaking memory aborts the program if its flag is
/// not cleared.
///
/// This type is useful for safely constructing [`MoveRef`]s.
pub struct TrappedFlag {
counter: Cell<usize>,
// In debug mode, we capture the location the trap is created at, to help
// connect an eventual failure to the matching storage.
#[cfg(debug_assertions)]
location: &'static core::panic::Location<'static>,
}
impl TrappedFlag {
/// Creates a new trap with a dead flag.
#[cfg(debug_assertions)]
#[track_caller]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
location: core::panic::Location::caller(),
}
}
/// Creates a new trap with a dead flag.
#[cfg(not(debug_assertions))]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
/// Preemptively checks that this flag has been cleared.
///
/// Aborts (rather than panicking!) if the assertion fails.
pub fn assert_cleared(&self) {
if self.flag().is_dead() {
return;
}
// We can force an abort by triggering a panic mid-unwind.
// This is the only way to force an LLVM abort from inside of `core`.
struct DoublePanic;
impl Drop for DoublePanic {
fn drop(&mut self) {
// In tests, we don't double-panic so that we can observe the
// failure correctly.
if cfg!(not(test)) {
panic!()
}
}
}
let _dp = DoublePanic;
#[cfg(debug_assertions)]
panic!("a critical drop flag at {} was not cleared!", self.location);
#[cfg(not(debug_assertions))]
panic!("a critical drop flag was not cleared!");
}
}
impl Default for TrappedFlag {
fn default() -> Self {
Self::new()
}
}
impl Drop for TrappedFlag {
fn drop(&mut self) {
self.assert_cleared();
}
}
/// A [`DropFlag`] source that doesn't do anything with it.
///
/// This is similar to `TrappedFlag`, but where it does not abort the program
/// if used incorrectly. This type is generally only useful when some separate
/// mechanism is ensuring that invariants are not violated.
pub struct QuietFlag {
counter: Cell<usize>,
}
impl QuietFlag {
/// Creates a new dead flag.
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
}
impl Default for QuietFlag {
fn default() -> Self {
Self::new()
}
}
| as_parts_mut | identifier_name |
drop_flag.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Drop flags.
//!
//! The [`Pin<P>`] guarantees state that if we have a `T` allocated somewhere,
//! and we construct a pinned reference to it such as a `Pin<&'a mut T>`, then
//! before that "somewhere" in memory is reused by another Rust object, `T`'s
//! destructor must run.
//!
//! Normally, this isn't a problem for Rust code, since the storage of an object
//! is destroyed immediately after it is destroyed. [`DerefMove`], however,
//! breaks this expectation: it separates the destructors from its storage and
//! contents into two separately destroyed objects: a [`DerefMove::Storage`]
//! and a [`MoveRef`]. If the [`MoveRef`] is [`mem::forget`]'ed, we lose: the
//! storage will potentially be re-used.
//!
//! Therefore, we must somehow detect that [`MoveRef`]s fail to be destroyed
//! when the destructor for the corresponding storage is run, and remediate it,
//! either by leaking heap storage or aborting if we would free stack storage
//! (a panic is insufficient, since that location can be reused if the panic is
//! caught).
//!
//! A [`DropFlag`] allows us to achieve this. It is a generalized, library-level
//! version of the Rust language's drop flags, which it uses to dynamically
//! determine whether to run destructors of stack-allocated values that might
//! have been moved from. Unlike Rust language drop flags, a [`DropFlag`] is
//! actually a counter, rather than a boolean. This allows storage that holds
//! many objects, like a vector, ensure that all contents have been properly
//! destroyed.
//!
//! This module also provides two helper types simplify safe creation and
//! management of drop flags.
//!
//! See the [Rustonomicon entry](https://doc.rust-lang.org/nomicon/drop-flags.html)
//! for the Rust language equivalent.
//!
//! # Safety
//!
//! No function in this module is `unsafe`: instead, functions that construct
//! [`MoveRef`]s out of [`DropFlag`]s are `unsafe`, and their callers are
//! responsible for ensuring that the passed-in [`DropFlag`] helps uphold the
//! relevant invariants.
use core::cell::Cell;
use core::mem;
use core::mem::ManuallyDrop;
use core::ops::Deref;
use core::ops::DerefMut;
#[cfg(doc)]
use {
crate::move_ref::{DerefMove, MoveRef},
alloc::boxed::Box,
core::pin::Pin,
};
/// A drop flag, for tracking successful destruction.
///
/// A `DropFlag` is a reference to a counter somewhere on the stack that lives
/// adjacent to storage for some value. It is just a counter: `unsafe` code is
/// expected to associate semantic meaning to it.
///
/// A flag with a value of zero is usually called "dead", and setting a flag to
/// the dead state is called clearing it.
///
/// See the [module documentation][self] for more information.
#[derive(Clone, Copy)]
pub struct DropFlag<'frame> {
counter: &'frame Cell<usize>,
}
impl DropFlag<'_> {
/// Increments the internal counter.
///
/// This function does not provide any overflow protection; `unsafe` code is
/// responsible for making sure that cannot happen.
#[inline]
pub fn inc(self) {
self.counter.set(self.counter.get() + 1)
}
/// Decrements the internal counter and returns true if it became zero.
///
/// This function will return `false` if the counter was already zero.
#[inline]
pub fn dec_and_check_if_died(self) -> bool {
if self.counter.get() == 0 {
return false;
}
self.counter.set(self.counter.get() - 1);
self.is_dead()
}
/// Returns whether the internal counter is zero.
#[inline]
pub fn is_dead(self) -> bool {
self.counter.get() == 0
}
/// Lengthens the lifetime of `self`.
#[inline]
#[allow(unused)]
pub(crate) unsafe fn longer_lifetime<'a>(self) -> DropFlag<'a> {
DropFlag {
counter: mem::transmute(self.counter),
}
}
}
/// A wrapper for managing when a value gets dropped via a [`DropFlag`].
///
/// This type tracks the destruction state of some value relative to another
/// value via its [`DropFlag`]: for example, it might be the storage of a value
/// wrapped up in a [`MoveRef`]. When a `DroppingFlag` is destroyed, it will
/// run the destructor for the wrapped value if and only if the [`DropFlag`]
/// is dead.
///
/// This type can be viewed as using a [`DropFlag`] to "complete" a
/// [`ManuallyDrop<T>`] by explicitly tracking whether it has been dropped. The
/// flag can be used to signal whether to destroy or leak the value, but the
/// destruction occurs lazily rather than immediately when the flag is flipped.
///
/// This is useful as a [`DerefMove::Storage`] type for types where the
/// storage should be leaked if the inner type was somehow not destroyed, such
/// as in the case of heap-allocated storage like [`Box<T>`].
pub struct DroppingFlag<T> {
value: ManuallyDrop<T>,
counter: Cell<usize>,
}
impl<T> DroppingFlag<T> {
/// Wraps a new value to have its drop state managed by a `DropFlag`.
///
/// The drop flag will start out dead and needs to be manually incremented.
pub fn new(value: T) -> Self {
Self {
value: ManuallyDrop::new(value),
counter: Cell::new(0),
}
}
/// Gets a reference to the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again.
pub fn flag(slot: &Self) -> DropFlag {
DropFlag {
counter: &slot.counter,
}
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts(slot: &Self) -> (&T, DropFlag) {
(
&slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts_mut(slot: &mut Self) -> (&mut T, DropFlag) {
(
&mut slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
}
impl<T> Deref for DroppingFlag<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for DroppingFlag<T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T> Drop for DroppingFlag<T> {
fn drop(&mut self) {
if Self::flag(self).is_dead() |
}
}
/// An RAII trap that ensures a drop flag is correctly cleared.
///
/// This type is *similar* to a [`DroppingFlag`], except that it does not wrap
/// a value and rather than leaking memory aborts the program if its flag is
/// not cleared.
///
/// This type is useful for safely constructing [`MoveRef`]s.
pub struct TrappedFlag {
counter: Cell<usize>,
// In debug mode, we capture the location the trap is created at, to help
// connect an eventual failure to the matching storage.
#[cfg(debug_assertions)]
location: &'static core::panic::Location<'static>,
}
impl TrappedFlag {
/// Creates a new trap with a dead flag.
#[cfg(debug_assertions)]
#[track_caller]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
location: core::panic::Location::caller(),
}
}
/// Creates a new trap with a dead flag.
#[cfg(not(debug_assertions))]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
/// Preemptively checks that this flag has been cleared.
///
/// Aborts (rather than panicking!) if the assertion fails.
pub fn assert_cleared(&self) {
if self.flag().is_dead() {
return;
}
// We can force an abort by triggering a panic mid-unwind.
// This is the only way to force an LLVM abort from inside of `core`.
struct DoublePanic;
impl Drop for DoublePanic {
fn drop(&mut self) {
// In tests, we don't double-panic so that we can observe the
// failure correctly.
if cfg!(not(test)) {
panic!()
}
}
}
let _dp = DoublePanic;
#[cfg(debug_assertions)]
panic!("a critical drop flag at {} was not cleared!", self.location);
#[cfg(not(debug_assertions))]
panic!("a critical drop flag was not cleared!");
}
}
impl Default for TrappedFlag {
fn default() -> Self {
Self::new()
}
}
impl Drop for TrappedFlag {
fn drop(&mut self) {
self.assert_cleared();
}
}
/// A [`DropFlag`] source that doesn't do anything with it.
///
/// This is similar to `TrappedFlag`, but where it does not abort the program
/// if used incorrectly. This type is generally only useful when some separate
/// mechanism is ensuring that invariants are not violated.
pub struct QuietFlag {
counter: Cell<usize>,
}
impl QuietFlag {
/// Creates a new dead flag.
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
}
impl Default for QuietFlag {
fn default() -> Self {
Self::new()
}
}
| {
unsafe {
ManuallyDrop::drop(&mut self.value);
}
} | conditional_block |
drop_flag.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Drop flags.
//!
//! The [`Pin<P>`] guarantees state that if we have a `T` allocated somewhere,
//! and we construct a pinned reference to it such as a `Pin<&'a mut T>`, then
//! before that "somewhere" in memory is reused by another Rust object, `T`'s
//! destructor must run.
//!
//! Normally, this isn't a problem for Rust code, since the storage of an object
//! is destroyed immediately after it is destroyed. [`DerefMove`], however,
//! breaks this expectation: it separates the destructors from its storage and
//! contents into two separately destroyed objects: a [`DerefMove::Storage`]
//! and a [`MoveRef`]. If the [`MoveRef`] is [`mem::forget`]'ed, we lose: the
//! storage will potentially be re-used.
//!
//! Therefore, we must somehow detect that [`MoveRef`]s fail to be destroyed
//! when the destructor for the corresponding storage is run, and remediate it,
//! either by leaking heap storage or aborting if we would free stack storage
//! (a panic is insufficient, since that location can be reused if the panic is
//! caught).
//!
//! A [`DropFlag`] allows us to achieve this. It is a generalized, library-level
//! version of the Rust language's drop flags, which it uses to dynamically
//! determine whether to run destructors of stack-allocated values that might
//! have been moved from. Unlike Rust language drop flags, a [`DropFlag`] is
//! actually a counter, rather than a boolean. This allows storage that holds
//! many objects, like a vector, ensure that all contents have been properly
//! destroyed.
//!
//! This module also provides two helper types simplify safe creation and
//! management of drop flags.
//!
//! See the [Rustonomicon entry](https://doc.rust-lang.org/nomicon/drop-flags.html)
//! for the Rust language equivalent.
//!
//! # Safety
//!
//! No function in this module is `unsafe`: instead, functions that construct
//! [`MoveRef`]s out of [`DropFlag`]s are `unsafe`, and their callers are
//! responsible for ensuring that the passed-in [`DropFlag`] helps uphold the
//! relevant invariants.
use core::cell::Cell;
use core::mem;
use core::mem::ManuallyDrop;
use core::ops::Deref;
use core::ops::DerefMut;
#[cfg(doc)]
use {
crate::move_ref::{DerefMove, MoveRef},
alloc::boxed::Box,
core::pin::Pin,
};
/// A drop flag, for tracking successful destruction.
///
/// A `DropFlag` is a reference to a counter somewhere on the stack that lives
/// adjacent to storage for some value. It is just a counter: `unsafe` code is
/// expected to associate semantic meaning to it.
///
/// A flag with a value of zero is usually called "dead", and setting a flag to
/// the dead state is called clearing it.
///
/// See the [module documentation][self] for more information.
#[derive(Clone, Copy)]
pub struct DropFlag<'frame> {
counter: &'frame Cell<usize>,
}
impl DropFlag<'_> {
/// Increments the internal counter.
///
/// This function does not provide any overflow protection; `unsafe` code is
/// responsible for making sure that cannot happen.
#[inline]
pub fn inc(self) {
self.counter.set(self.counter.get() + 1)
}
/// Decrements the internal counter and returns true if it became zero.
///
/// This function will return `false` if the counter was already zero.
#[inline]
pub fn dec_and_check_if_died(self) -> bool {
if self.counter.get() == 0 {
return false;
}
self.counter.set(self.counter.get() - 1);
self.is_dead()
}
/// Returns whether the internal counter is zero.
#[inline]
pub fn is_dead(self) -> bool {
self.counter.get() == 0
}
/// Lengthens the lifetime of `self`.
#[inline]
#[allow(unused)]
pub(crate) unsafe fn longer_lifetime<'a>(self) -> DropFlag<'a> {
DropFlag {
counter: mem::transmute(self.counter),
}
}
}
/// A wrapper for managing when a value gets dropped via a [`DropFlag`].
///
/// This type tracks the destruction state of some value relative to another
/// value via its [`DropFlag`]: for example, it might be the storage of a value
/// wrapped up in a [`MoveRef`]. When a `DroppingFlag` is destroyed, it will
/// run the destructor for the wrapped value if and only if the [`DropFlag`]
/// is dead.
///
/// This type can be viewed as using a [`DropFlag`] to "complete" a
/// [`ManuallyDrop<T>`] by explicitly tracking whether it has been dropped. The
/// flag can be used to signal whether to destroy or leak the value, but the
/// destruction occurs lazily rather than immediately when the flag is flipped.
///
/// This is useful as a [`DerefMove::Storage`] type for types where the
/// storage should be leaked if the inner type was somehow not destroyed, such
/// as in the case of heap-allocated storage like [`Box<T>`].
pub struct DroppingFlag<T> {
value: ManuallyDrop<T>,
counter: Cell<usize>,
}
impl<T> DroppingFlag<T> {
/// Wraps a new value to have its drop state managed by a `DropFlag`.
///
/// The drop flag will start out dead and needs to be manually incremented.
pub fn new(value: T) -> Self {
Self {
value: ManuallyDrop::new(value),
counter: Cell::new(0),
}
}
/// Gets a reference to the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again.
pub fn flag(slot: &Self) -> DropFlag {
DropFlag {
counter: &slot.counter,
}
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts(slot: &Self) -> (&T, DropFlag) {
(
&slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts_mut(slot: &mut Self) -> (&mut T, DropFlag) {
(
&mut slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
}
impl<T> Deref for DroppingFlag<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for DroppingFlag<T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T> Drop for DroppingFlag<T> {
fn drop(&mut self) {
if Self::flag(self).is_dead() {
unsafe {
ManuallyDrop::drop(&mut self.value);
}
}
}
}
/// An RAII trap that ensures a drop flag is correctly cleared.
///
/// This type is *similar* to a [`DroppingFlag`], except that it does not wrap
/// a value and rather than leaking memory aborts the program if its flag is
/// not cleared.
///
/// This type is useful for safely constructing [`MoveRef`]s.
pub struct TrappedFlag {
counter: Cell<usize>,
// In debug mode, we capture the location the trap is created at, to help
// connect an eventual failure to the matching storage.
#[cfg(debug_assertions)]
location: &'static core::panic::Location<'static>,
}
impl TrappedFlag {
/// Creates a new trap with a dead flag.
#[cfg(debug_assertions)]
#[track_caller]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
location: core::panic::Location::caller(),
}
}
/// Creates a new trap with a dead flag.
#[cfg(not(debug_assertions))]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
/// Preemptively checks that this flag has been cleared.
///
/// Aborts (rather than panicking!) if the assertion fails.
pub fn assert_cleared(&self) {
if self.flag().is_dead() {
return;
}
// We can force an abort by triggering a panic mid-unwind.
// This is the only way to force an LLVM abort from inside of `core`.
struct DoublePanic;
impl Drop for DoublePanic {
fn drop(&mut self) {
// In tests, we don't double-panic so that we can observe the
// failure correctly.
if cfg!(not(test)) {
panic!()
}
}
}
let _dp = DoublePanic;
#[cfg(debug_assertions)]
panic!("a critical drop flag at {} was not cleared!", self.location);
#[cfg(not(debug_assertions))]
panic!("a critical drop flag was not cleared!");
}
}
impl Default for TrappedFlag {
fn default() -> Self |
}
impl Drop for TrappedFlag {
fn drop(&mut self) {
self.assert_cleared();
}
}
/// A [`DropFlag`] source that doesn't do anything with it.
///
/// This is similar to `TrappedFlag`, but where it does not abort the program
/// if used incorrectly. This type is generally only useful when some separate
/// mechanism is ensuring that invariants are not violated.
pub struct QuietFlag {
counter: Cell<usize>,
}
impl QuietFlag {
/// Creates a new dead flag.
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
}
impl Default for QuietFlag {
fn default() -> Self {
Self::new()
}
}
| {
Self::new()
} | identifier_body |
drop_flag.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Drop flags.
//!
//! The [`Pin<P>`] guarantees state that if we have a `T` allocated somewhere,
//! and we construct a pinned reference to it such as a `Pin<&'a mut T>`, then
//! before that "somewhere" in memory is reused by another Rust object, `T`'s
//! destructor must run.
//!
//! Normally, this isn't a problem for Rust code, since the storage of an object
//! is destroyed immediately after it is destroyed. [`DerefMove`], however,
//! breaks this expectation: it separates the destructors from its storage and
//! contents into two separately destroyed objects: a [`DerefMove::Storage`]
//! and a [`MoveRef`]. If the [`MoveRef`] is [`mem::forget`]'ed, we lose: the
//! storage will potentially be re-used.
//!
//! Therefore, we must somehow detect that [`MoveRef`]s fail to be destroyed
//! when the destructor for the corresponding storage is run, and remediate it,
//! either by leaking heap storage or aborting if we would free stack storage
//! (a panic is insufficient, since that location can be reused if the panic is
//! caught).
//!
//! A [`DropFlag`] allows us to achieve this. It is a generalized, library-level
//! version of the Rust language's drop flags, which it uses to dynamically
//! determine whether to run destructors of stack-allocated values that might
//! have been moved from. Unlike Rust language drop flags, a [`DropFlag`] is
//! actually a counter, rather than a boolean. This allows storage that holds
//! many objects, like a vector, ensure that all contents have been properly
//! destroyed.
//!
//! This module also provides two helper types simplify safe creation and
//! management of drop flags.
//!
//! See the [Rustonomicon entry](https://doc.rust-lang.org/nomicon/drop-flags.html)
//! for the Rust language equivalent.
//!
//! # Safety
//!
//! No function in this module is `unsafe`: instead, functions that construct
//! [`MoveRef`]s out of [`DropFlag`]s are `unsafe`, and their callers are
//! responsible for ensuring that the passed-in [`DropFlag`] helps uphold the
//! relevant invariants.
use core::cell::Cell;
use core::mem;
use core::mem::ManuallyDrop;
use core::ops::Deref;
use core::ops::DerefMut;
#[cfg(doc)]
use {
crate::move_ref::{DerefMove, MoveRef},
alloc::boxed::Box,
core::pin::Pin,
};
/// A drop flag, for tracking successful destruction.
///
/// A `DropFlag` is a reference to a counter somewhere on the stack that lives
/// adjacent to storage for some value. It is just a counter: `unsafe` code is
/// expected to associate semantic meaning to it.
///
/// A flag with a value of zero is usually called "dead", and setting a flag to
/// the dead state is called clearing it.
///
/// See the [module documentation][self] for more information.
#[derive(Clone, Copy)]
pub struct DropFlag<'frame> {
counter: &'frame Cell<usize>,
}
impl DropFlag<'_> {
/// Increments the internal counter.
///
/// This function does not provide any overflow protection; `unsafe` code is
/// responsible for making sure that cannot happen.
#[inline]
pub fn inc(self) {
self.counter.set(self.counter.get() + 1)
}
/// Decrements the internal counter and returns true if it became zero.
///
/// This function will return `false` if the counter was already zero.
#[inline]
pub fn dec_and_check_if_died(self) -> bool {
if self.counter.get() == 0 {
return false;
}
self.counter.set(self.counter.get() - 1);
self.is_dead()
}
/// Returns whether the internal counter is zero.
#[inline]
pub fn is_dead(self) -> bool {
self.counter.get() == 0
}
/// Lengthens the lifetime of `self`.
#[inline]
#[allow(unused)]
pub(crate) unsafe fn longer_lifetime<'a>(self) -> DropFlag<'a> {
DropFlag {
counter: mem::transmute(self.counter),
}
}
}
| /// wrapped up in a [`MoveRef`]. When a `DroppingFlag` is destroyed, it will
/// run the destructor for the wrapped value if and only if the [`DropFlag`]
/// is dead.
///
/// This type can be viewed as using a [`DropFlag`] to "complete" a
/// [`ManuallyDrop<T>`] by explicitly tracking whether it has been dropped. The
/// flag can be used to signal whether to destroy or leak the value, but the
/// destruction occurs lazily rather than immediately when the flag is flipped.
///
/// This is useful as a [`DerefMove::Storage`] type for types where the
/// storage should be leaked if the inner type was somehow not destroyed, such
/// as in the case of heap-allocated storage like [`Box<T>`].
pub struct DroppingFlag<T> {
value: ManuallyDrop<T>,
counter: Cell<usize>,
}
impl<T> DroppingFlag<T> {
/// Wraps a new value to have its drop state managed by a `DropFlag`.
///
/// The drop flag will start out dead and needs to be manually incremented.
pub fn new(value: T) -> Self {
Self {
value: ManuallyDrop::new(value),
counter: Cell::new(0),
}
}
/// Gets a reference to the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again.
pub fn flag(slot: &Self) -> DropFlag {
DropFlag {
counter: &slot.counter,
}
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts(slot: &Self) -> (&T, DropFlag) {
(
&slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
/// Splits this slot into a reference to the wrapped value plus a reference to
/// the drop flag.
///
/// This function is safe; the returned reference to the drop flag cannot be
/// used to make a previously dropped value live again, since the value is
/// not destroyed before the wrapper is.
pub fn as_parts_mut(slot: &mut Self) -> (&mut T, DropFlag) {
(
&mut slot.value,
DropFlag {
counter: &slot.counter,
},
)
}
}
impl<T> Deref for DroppingFlag<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for DroppingFlag<T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T> Drop for DroppingFlag<T> {
fn drop(&mut self) {
if Self::flag(self).is_dead() {
unsafe {
ManuallyDrop::drop(&mut self.value);
}
}
}
}
/// An RAII trap that ensures a drop flag is correctly cleared.
///
/// This type is *similar* to a [`DroppingFlag`], except that it does not wrap
/// a value and rather than leaking memory aborts the program if its flag is
/// not cleared.
///
/// This type is useful for safely constructing [`MoveRef`]s.
pub struct TrappedFlag {
counter: Cell<usize>,
// In debug mode, we capture the location the trap is created at, to help
// connect an eventual failure to the matching storage.
#[cfg(debug_assertions)]
location: &'static core::panic::Location<'static>,
}
impl TrappedFlag {
/// Creates a new trap with a dead flag.
#[cfg(debug_assertions)]
#[track_caller]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
location: core::panic::Location::caller(),
}
}
/// Creates a new trap with a dead flag.
#[cfg(not(debug_assertions))]
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
/// Preemptively checks that this flag has been cleared.
///
/// Aborts (rather than panicking!) if the assertion fails.
pub fn assert_cleared(&self) {
if self.flag().is_dead() {
return;
}
// We can force an abort by triggering a panic mid-unwind.
// This is the only way to force an LLVM abort from inside of `core`.
struct DoublePanic;
impl Drop for DoublePanic {
fn drop(&mut self) {
// In tests, we don't double-panic so that we can observe the
// failure correctly.
if cfg!(not(test)) {
panic!()
}
}
}
let _dp = DoublePanic;
#[cfg(debug_assertions)]
panic!("a critical drop flag at {} was not cleared!", self.location);
#[cfg(not(debug_assertions))]
panic!("a critical drop flag was not cleared!");
}
}
impl Default for TrappedFlag {
fn default() -> Self {
Self::new()
}
}
impl Drop for TrappedFlag {
fn drop(&mut self) {
self.assert_cleared();
}
}
/// A [`DropFlag`] source that doesn't do anything with it.
///
/// This is similar to `TrappedFlag`, but where it does not abort the program
/// if used incorrectly. This type is generally only useful when some separate
/// mechanism is ensuring that invariants are not violated.
pub struct QuietFlag {
counter: Cell<usize>,
}
impl QuietFlag {
/// Creates a new dead flag.
pub fn new() -> Self {
Self {
counter: Cell::new(0),
}
}
/// Returns a reference to the [`DropFlag`].
pub fn flag(&self) -> DropFlag {
DropFlag {
counter: &self.counter,
}
}
}
impl Default for QuietFlag {
fn default() -> Self {
Self::new()
}
} | /// A wrapper for managing when a value gets dropped via a [`DropFlag`].
///
/// This type tracks the destruction state of some value relative to another
/// value via its [`DropFlag`]: for example, it might be the storage of a value | random_line_split |
stylesheet_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::sync::Arc;
use style::gecko_bindings::bindings::Gecko_LoadStyleSheet;
use style::gecko_bindings::structs::{Loader, ServoStyleSheet};
use style::gecko_bindings::sugar::ownership::HasArcFFI;
use style::media_queries::MediaList;
use style::shared_lock::Locked;
use style::stylesheets::{ImportRule, StylesheetLoader as StyleStylesheetLoader};
use style_traits::ToCss;
pub struct StylesheetLoader(*mut Loader, *mut ServoStyleSheet);
impl StylesheetLoader {
pub fn new(loader: *mut Loader, parent: *mut ServoStyleSheet) -> Self |
}
impl StyleStylesheetLoader for StylesheetLoader {
fn request_stylesheet(
&self,
media: MediaList,
make_import: &mut FnMut(MediaList) -> ImportRule,
make_arc: &mut FnMut(ImportRule) -> Arc<Locked<ImportRule>>,
) -> Arc<Locked<ImportRule>> {
// TODO(emilio): We probably want to share media representation with
// Gecko in Stylo.
//
// This also allows us to get rid of a bunch of extra work to evaluate
// and ensure parity, and shouldn't be much Gecko work given we always
// evaluate them on the main thread.
//
// Meanwhile, this works.
let media_string = media.to_css_string();
let import = make_import(media);
// After we get this raw pointer ImportRule will be moved into a lock and Arc
// and so the Arc<Url> pointer inside will also move,
// but the Url it points to or the allocating backing the String inside that Url won’t,
// so this raw pointer will still be valid.
let (spec_bytes, spec_len): (*const u8, usize) = import.url.as_slice_components()
.expect("Import only loads valid URLs");
let arc = make_arc(import);
unsafe {
Gecko_LoadStyleSheet(self.0,
self.1,
HasArcFFI::arc_as_borrowed(&arc),
spec_bytes,
spec_len as u32,
media_string.as_bytes().as_ptr(),
media_string.len() as u32);
}
arc
}
}
| {
StylesheetLoader(loader, parent)
} | identifier_body |
stylesheet_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
| use style::shared_lock::Locked;
use style::stylesheets::{ImportRule, StylesheetLoader as StyleStylesheetLoader};
use style_traits::ToCss;
pub struct StylesheetLoader(*mut Loader, *mut ServoStyleSheet);
impl StylesheetLoader {
pub fn new(loader: *mut Loader, parent: *mut ServoStyleSheet) -> Self {
StylesheetLoader(loader, parent)
}
}
impl StyleStylesheetLoader for StylesheetLoader {
fn request_stylesheet(
&self,
media: MediaList,
make_import: &mut FnMut(MediaList) -> ImportRule,
make_arc: &mut FnMut(ImportRule) -> Arc<Locked<ImportRule>>,
) -> Arc<Locked<ImportRule>> {
// TODO(emilio): We probably want to share media representation with
// Gecko in Stylo.
//
// This also allows us to get rid of a bunch of extra work to evaluate
// and ensure parity, and shouldn't be much Gecko work given we always
// evaluate them on the main thread.
//
// Meanwhile, this works.
let media_string = media.to_css_string();
let import = make_import(media);
// After we get this raw pointer ImportRule will be moved into a lock and Arc
// and so the Arc<Url> pointer inside will also move,
// but the Url it points to or the allocating backing the String inside that Url won’t,
// so this raw pointer will still be valid.
let (spec_bytes, spec_len): (*const u8, usize) = import.url.as_slice_components()
.expect("Import only loads valid URLs");
let arc = make_arc(import);
unsafe {
Gecko_LoadStyleSheet(self.0,
self.1,
HasArcFFI::arc_as_borrowed(&arc),
spec_bytes,
spec_len as u32,
media_string.as_bytes().as_ptr(),
media_string.len() as u32);
}
arc
}
} | use std::sync::Arc;
use style::gecko_bindings::bindings::Gecko_LoadStyleSheet;
use style::gecko_bindings::structs::{Loader, ServoStyleSheet};
use style::gecko_bindings::sugar::ownership::HasArcFFI;
use style::media_queries::MediaList; | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.