file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset!= 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField { | for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else {
false
}
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self {
watched.bitfield
}
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn deserialize_empty() {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
} | bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset | random_line_split |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 {
width = view.len();
}
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
}
fn forward(&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c!= b'.' && *c!= b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos]!= b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p]!= b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' &&!o.is_opposite(n.0) { | o = n.0;
break;
}
}
if!found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f|!f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f|!f.is_empty() && f!= &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {} | found_turn = true;
route.append(&mut o.orient(n.0)); | random_line_split |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 {
width = view.len();
}
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> |
fn forward(&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c!= b'.' && *c!= b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos]!= b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p]!= b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' &&!o.is_opposite(n.0) {
found_turn = true;
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
if!found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f|!f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f|!f.is_empty() && f!= &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {}
| {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
} | identifier_body |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 {
width = view.len();
}
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
}
fn | (&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c!= b'.' && *c!= b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos]!= b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p]!= b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' &&!o.is_opposite(n.0) {
found_turn = true;
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
if!found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f|!f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f|!f.is_empty() && f!= &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {}
| forward | identifier_name |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 |
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
}
fn forward(&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c!= b'.' && *c!= b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos]!= b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p]!= b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' &&!o.is_opposite(n.0) {
found_turn = true;
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
if!found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f|!f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if!s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f|!f.is_empty() && f!= &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {}
| {
width = view.len();
} | conditional_block |
val.rs | //! A concrete implementation of `futures::Future`. It is similar in spirit as
//! `futures::Promise`, but is better suited for use with Tokio.
use futures::{Future, Poll, Task};
use std::mem;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use self::State::*;
/// A future representing the completion of an asynchronous computation.
///
/// This is created by the `pair` function.
pub struct Val<T, E> {
inner: Arc<Inner<T, E>>,
}
/// The `Complete` half of `Val` used to send the result of an asynchronous
/// computation to the consumer of `Val`.
///
/// This is created by the `pair` function.
pub struct Complete<T, E> {
inner: Arc<Inner<T, E>>,
cancellation: Cell<bool>,
}
/// A future representing the cancellation in interest by the consumer of
/// `Val`.
///
/// If a `Val` is dropped without ever attempting to read the value, then it
/// becomes impossible to ever receive the result of the underlying
/// computation. This indicates that there is no interest in the computation
/// and it may be cancelled.
///
/// In this case, this future will be completed. The asynchronous computation
/// is able to listen for this completion and abort work early.
pub struct Cancellation {
inner: Arc<SyncFuture>,
}
// Currently implemented with a mutex, but this is only to get something
// working. This should be rewritten to use a lock free strategy.
struct Inner<T, E> {
state: Mutex<State<T, E>>,
}
enum State<T, E> {
Init {
consumer: Option<Callback>,
cancellation: Option<Callback>
},
Completed(Option<Result<T, E>>),
Cancelled,
Consumed,
}
type Callback = Box<FnBox>;
/// Create and return a new `Complete` / `Val` pair.
///
/// `Complete` is used to send the result of an asynchronous computation to the
/// consumer of `Val`.
pub fn pair<T, E>() -> (Complete<T, E>, Val<T, E>) {
let inner = Arc::new(Inner {
state: Mutex::new(State::Init {
consumer: None,
cancellation: None,
}),
});
let tx = Complete {
inner: inner.clone(),
cancellation: Cell::new(false),
};
let rx = Val {
inner: inner,
};
(tx, rx)
}
/*
*
* ===== Val =====
*
*/
impl<T, E> Future for Val<T, E>
where T: Send +'static,
E: Send +'static,
{
type Item = T;
type Error = E;
fn poll(&mut self, task: &mut Task) -> Poll<T, E> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
impl<T, E> Drop for Val<T, E> {
fn drop(&mut self) {
self.inner.cancel();
}
}
/*
*
* ===== Complete =====
*
*/
impl<T, E> Complete<T, E>
where T: Send +'static,
E: Send +'static,
{
/// Successfully complete the associated `Val` with the given value.
pub fn complete(self, val: T) {
self.inner.complete(Some(Ok(val)), false);
}
/// Complete the associated `Val` with the given error
pub fn error(self, err: E) {
self.inner.complete(Some(Err(err)), false);
}
/// Abort the computation. This will cause the associated `Val` to panic on
/// a call to `poll`.
pub fn abort(self) {
self.inner.complete(None, false);
}
/// Returns a `Future` representing the consuming end cancelling interest
/// in the future.
///
/// This function can only be called once.
///
/// # Panics
///
/// A second call to this function will result in a panic.
pub fn cancellation(&self) -> Cancellation {
if self.cancellation.get() {
panic!("cancellation future already obtained");
}
self.cancellation.set(true);
Cancellation { inner: self.inner.clone() }
}
}
impl<T, E> Drop for Complete<T, E> {
fn drop(&mut self) {
self.inner.complete(None, true);
}
}
/*
*
* ===== Cancellation =====
*
*/
impl Future for Cancellation {
type Item = bool;
type Error = ();
fn | (&mut self, task: &mut Task) -> Poll<bool, ()> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
/*
*
* ===== Inner =====
*
*/
impl<T, E> Inner<T, E> {
/// Complete the future with the given result
fn complete(&self, res: Option<Result<T, E>>, panic: bool) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => {
if panic { panic!("failed to lock mutex") };
return;
}
};
let cb;
match state.take() {
Init { consumer,.. } => cb = consumer,
s => {
if res.is_some() {
panic!("attempting to complete already completed future");
} else {
*state = s;
return;
}
}
}
*state = Completed(res);
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Cancel interest in the future
fn cancel(&self) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => return,
};
let cb;
match state.take() {
Init { cancellation,.. } => cb = cancellation,
Completed(_) | Cancelled | Consumed => {
return; // Cannot cancel from these states
}
}
*state = Cancelled;
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Poll the inner state for a value
fn poll(&self, _: &mut Task) -> Poll<T, E> {
let mut state = self.state.lock().unwrap();
if state.is_complete() {
match state.take() {
Completed(Some(Ok(v))) => Poll::Ok(v),
Completed(Some(Err(e))) => Poll::Err(e),
Completed(None) => panic!("Complete dropped without producing a value"),
Consumed => panic!("Val already consumed"),
_ => unreachable!(),
}
} else {
Poll::NotReady
}
}
/// Associate completion with the given task
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_consumer_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
// A little hacky, but implementing Future on Inner allows losing the generics
// on Cancellation
trait SyncFuture: Send + Sync +'static {
fn poll(&self, task: &mut Task) -> Poll<bool, ()>;
fn schedule(&self, task: &mut Task);
}
impl<T, E> SyncFuture for Inner<T, E>
where T: Send +'static,
E: Send +'static,
{
fn poll(&self, _: &mut Task) -> Poll<bool, ()> {
let state = self.state.lock().unwrap();
match *state {
Init {.. } => Poll::NotReady,
Cancelled => Poll::Ok(true),
_ => Poll::Ok(false),
}
}
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_cancellation_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
impl<T, E> State<T, E> {
fn in_flight(&self) -> bool {
match *self {
Init {.. } => true,
_ => false,
}
}
/// Returns true if in a completed state.
fn is_complete(&self) -> bool {
match *self {
Completed(_) | Consumed => true,
_ => false,
}
}
fn set_consumer_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut consumer,.. } => *consumer = Some(cb),
_ => panic!("unexpected state"),
}
}
fn set_cancellation_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut cancellation,.. } => *cancellation = Some(cb),
_ => panic!("unexpected state"),
}
}
/// Sets the current state to Consumed and returns the original value
fn take(&mut self) -> State<T, E> {
mem::replace(self, State::Consumed)
}
}
trait FnBox: Send +'static {
fn call_box(self: Box<Self>);
}
impl<F> FnBox for F
where F: FnOnce() + Send +'static
{
fn call_box(self: Box<F>) {
(*self)()
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::Future;
use std::sync::mpsc;
#[test]
fn test_complete_after_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
c.complete(123);
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_complete_before_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.complete(123);
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_polling_aborted_future_panics() {
use std::thread;
let res = thread::spawn(|| {
let (c, val) = pair::<u32, ()>();
val.then(move |res| {
println!("WAT: {:?}", res);
res
}).forget();
c.abort();
});
assert!(res.join().is_err());
}
#[test]
fn test_cancellation_future() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.cancellation().then(move |res| {
tx.send(123).unwrap();
res
}).forget();
assert!(rx.try_recv().is_err());
drop(val);
assert_eq!(123, rx.recv().unwrap());
}
}
| poll | identifier_name |
val.rs | //! A concrete implementation of `futures::Future`. It is similar in spirit as
//! `futures::Promise`, but is better suited for use with Tokio.
use futures::{Future, Poll, Task};
use std::mem;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use self::State::*;
/// A future representing the completion of an asynchronous computation.
///
/// This is created by the `pair` function.
pub struct Val<T, E> {
inner: Arc<Inner<T, E>>,
}
/// The `Complete` half of `Val` used to send the result of an asynchronous
/// computation to the consumer of `Val`.
///
/// This is created by the `pair` function.
pub struct Complete<T, E> {
inner: Arc<Inner<T, E>>,
cancellation: Cell<bool>,
}
/// A future representing the cancellation in interest by the consumer of
/// `Val`.
///
/// If a `Val` is dropped without ever attempting to read the value, then it
/// becomes impossible to ever receive the result of the underlying
/// computation. This indicates that there is no interest in the computation
/// and it may be cancelled.
///
/// In this case, this future will be completed. The asynchronous computation
/// is able to listen for this completion and abort work early.
pub struct Cancellation {
inner: Arc<SyncFuture>,
}
// Currently implemented with a mutex, but this is only to get something
// working. This should be rewritten to use a lock free strategy.
struct Inner<T, E> {
state: Mutex<State<T, E>>,
}
enum State<T, E> {
Init {
consumer: Option<Callback>,
cancellation: Option<Callback>
},
Completed(Option<Result<T, E>>),
Cancelled,
Consumed,
}
type Callback = Box<FnBox>;
/// Create and return a new `Complete` / `Val` pair.
///
/// `Complete` is used to send the result of an asynchronous computation to the
/// consumer of `Val`.
pub fn pair<T, E>() -> (Complete<T, E>, Val<T, E>) {
let inner = Arc::new(Inner {
state: Mutex::new(State::Init {
consumer: None,
cancellation: None,
}),
});
let tx = Complete {
inner: inner.clone(),
cancellation: Cell::new(false),
};
let rx = Val {
inner: inner,
};
(tx, rx)
}
/*
*
* ===== Val =====
*
*/
impl<T, E> Future for Val<T, E>
where T: Send +'static,
E: Send +'static,
{
type Item = T;
type Error = E;
fn poll(&mut self, task: &mut Task) -> Poll<T, E> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
impl<T, E> Drop for Val<T, E> {
fn drop(&mut self) {
self.inner.cancel();
}
}
/*
*
* ===== Complete =====
*
*/
impl<T, E> Complete<T, E>
where T: Send +'static,
E: Send +'static,
{
/// Successfully complete the associated `Val` with the given value.
pub fn complete(self, val: T) {
self.inner.complete(Some(Ok(val)), false);
}
/// Complete the associated `Val` with the given error
pub fn error(self, err: E) {
self.inner.complete(Some(Err(err)), false);
}
/// Abort the computation. This will cause the associated `Val` to panic on
/// a call to `poll`.
pub fn abort(self) {
self.inner.complete(None, false);
}
/// Returns a `Future` representing the consuming end cancelling interest
/// in the future.
///
/// This function can only be called once.
///
/// # Panics
///
/// A second call to this function will result in a panic.
pub fn cancellation(&self) -> Cancellation {
if self.cancellation.get() {
panic!("cancellation future already obtained");
}
self.cancellation.set(true);
Cancellation { inner: self.inner.clone() }
}
}
impl<T, E> Drop for Complete<T, E> {
fn drop(&mut self) {
self.inner.complete(None, true);
}
}
/*
*
* ===== Cancellation =====
*
*/
impl Future for Cancellation {
type Item = bool;
type Error = ();
fn poll(&mut self, task: &mut Task) -> Poll<bool, ()> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
/*
*
* ===== Inner =====
*
*/
impl<T, E> Inner<T, E> {
/// Complete the future with the given result
fn complete(&self, res: Option<Result<T, E>>, panic: bool) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => {
if panic { panic!("failed to lock mutex") };
return;
}
};
let cb;
match state.take() {
Init { consumer,.. } => cb = consumer,
s => {
if res.is_some() {
panic!("attempting to complete already completed future");
} else {
*state = s;
return;
}
}
}
*state = Completed(res);
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Cancel interest in the future
fn cancel(&self) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => return,
};
let cb;
match state.take() {
Init { cancellation,.. } => cb = cancellation,
Completed(_) | Cancelled | Consumed => {
return; // Cannot cancel from these states
}
}
*state = Cancelled;
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Poll the inner state for a value
fn poll(&self, _: &mut Task) -> Poll<T, E> {
let mut state = self.state.lock().unwrap();
if state.is_complete() {
match state.take() {
Completed(Some(Ok(v))) => Poll::Ok(v),
Completed(Some(Err(e))) => Poll::Err(e),
Completed(None) => panic!("Complete dropped without producing a value"),
Consumed => panic!("Val already consumed"),
_ => unreachable!(),
}
} else {
Poll::NotReady
}
}
/// Associate completion with the given task
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_consumer_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
// A little hacky, but implementing Future on Inner allows losing the generics
// on Cancellation
trait SyncFuture: Send + Sync +'static {
fn poll(&self, task: &mut Task) -> Poll<bool, ()>;
fn schedule(&self, task: &mut Task);
}
impl<T, E> SyncFuture for Inner<T, E>
where T: Send +'static,
E: Send +'static,
{
fn poll(&self, _: &mut Task) -> Poll<bool, ()> {
let state = self.state.lock().unwrap();
match *state {
Init {.. } => Poll::NotReady,
Cancelled => Poll::Ok(true),
_ => Poll::Ok(false),
}
}
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_cancellation_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
impl<T, E> State<T, E> {
fn in_flight(&self) -> bool {
match *self {
Init {.. } => true,
_ => false,
}
}
/// Returns true if in a completed state.
fn is_complete(&self) -> bool {
match *self {
Completed(_) | Consumed => true,
_ => false,
}
}
fn set_consumer_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut consumer,.. } => *consumer = Some(cb),
_ => panic!("unexpected state"),
}
}
fn set_cancellation_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut cancellation,.. } => *cancellation = Some(cb),
_ => panic!("unexpected state"),
}
}
/// Sets the current state to Consumed and returns the original value
fn take(&mut self) -> State<T, E> {
mem::replace(self, State::Consumed)
}
}
trait FnBox: Send +'static {
fn call_box(self: Box<Self>);
}
impl<F> FnBox for F
where F: FnOnce() + Send +'static
{
fn call_box(self: Box<F>) {
(*self)()
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::Future;
use std::sync::mpsc;
#[test]
fn test_complete_after_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
c.complete(123);
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_complete_before_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.complete(123); | }).forget();
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_polling_aborted_future_panics() {
use std::thread;
let res = thread::spawn(|| {
let (c, val) = pair::<u32, ()>();
val.then(move |res| {
println!("WAT: {:?}", res);
res
}).forget();
c.abort();
});
assert!(res.join().is_err());
}
#[test]
fn test_cancellation_future() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.cancellation().then(move |res| {
tx.send(123).unwrap();
res
}).forget();
assert!(rx.try_recv().is_err());
drop(val);
assert_eq!(123, rx.recv().unwrap());
}
} |
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res | random_line_split |
val.rs | //! A concrete implementation of `futures::Future`. It is similar in spirit as
//! `futures::Promise`, but is better suited for use with Tokio.
use futures::{Future, Poll, Task};
use std::mem;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use self::State::*;
/// A future representing the completion of an asynchronous computation.
///
/// This is created by the `pair` function.
pub struct Val<T, E> {
inner: Arc<Inner<T, E>>,
}
/// The `Complete` half of `Val` used to send the result of an asynchronous
/// computation to the consumer of `Val`.
///
/// This is created by the `pair` function.
pub struct Complete<T, E> {
inner: Arc<Inner<T, E>>,
cancellation: Cell<bool>,
}
/// A future representing the cancellation in interest by the consumer of
/// `Val`.
///
/// If a `Val` is dropped without ever attempting to read the value, then it
/// becomes impossible to ever receive the result of the underlying
/// computation. This indicates that there is no interest in the computation
/// and it may be cancelled.
///
/// In this case, this future will be completed. The asynchronous computation
/// is able to listen for this completion and abort work early.
pub struct Cancellation {
inner: Arc<SyncFuture>,
}
// Currently implemented with a mutex, but this is only to get something
// working. This should be rewritten to use a lock free strategy.
struct Inner<T, E> {
state: Mutex<State<T, E>>,
}
enum State<T, E> {
Init {
consumer: Option<Callback>,
cancellation: Option<Callback>
},
Completed(Option<Result<T, E>>),
Cancelled,
Consumed,
}
type Callback = Box<FnBox>;
/// Create and return a new `Complete` / `Val` pair.
///
/// `Complete` is used to send the result of an asynchronous computation to the
/// consumer of `Val`.
pub fn pair<T, E>() -> (Complete<T, E>, Val<T, E>) {
let inner = Arc::new(Inner {
state: Mutex::new(State::Init {
consumer: None,
cancellation: None,
}),
});
let tx = Complete {
inner: inner.clone(),
cancellation: Cell::new(false),
};
let rx = Val {
inner: inner,
};
(tx, rx)
}
/*
*
* ===== Val =====
*
*/
impl<T, E> Future for Val<T, E>
where T: Send +'static,
E: Send +'static,
{
type Item = T;
type Error = E;
fn poll(&mut self, task: &mut Task) -> Poll<T, E> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
impl<T, E> Drop for Val<T, E> {
fn drop(&mut self) {
self.inner.cancel();
}
}
/*
*
* ===== Complete =====
*
*/
impl<T, E> Complete<T, E>
where T: Send +'static,
E: Send +'static,
{
/// Successfully complete the associated `Val` with the given value.
pub fn complete(self, val: T) {
self.inner.complete(Some(Ok(val)), false);
}
/// Complete the associated `Val` with the given error
pub fn error(self, err: E) |
/// Abort the computation. This will cause the associated `Val` to panic on
/// a call to `poll`.
pub fn abort(self) {
self.inner.complete(None, false);
}
/// Returns a `Future` representing the consuming end cancelling interest
/// in the future.
///
/// This function can only be called once.
///
/// # Panics
///
/// A second call to this function will result in a panic.
pub fn cancellation(&self) -> Cancellation {
if self.cancellation.get() {
panic!("cancellation future already obtained");
}
self.cancellation.set(true);
Cancellation { inner: self.inner.clone() }
}
}
impl<T, E> Drop for Complete<T, E> {
fn drop(&mut self) {
self.inner.complete(None, true);
}
}
/*
*
* ===== Cancellation =====
*
*/
impl Future for Cancellation {
type Item = bool;
type Error = ();
fn poll(&mut self, task: &mut Task) -> Poll<bool, ()> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
/*
*
* ===== Inner =====
*
*/
impl<T, E> Inner<T, E> {
/// Complete the future with the given result
fn complete(&self, res: Option<Result<T, E>>, panic: bool) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => {
if panic { panic!("failed to lock mutex") };
return;
}
};
let cb;
match state.take() {
Init { consumer,.. } => cb = consumer,
s => {
if res.is_some() {
panic!("attempting to complete already completed future");
} else {
*state = s;
return;
}
}
}
*state = Completed(res);
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Cancel interest in the future
fn cancel(&self) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => return,
};
let cb;
match state.take() {
Init { cancellation,.. } => cb = cancellation,
Completed(_) | Cancelled | Consumed => {
return; // Cannot cancel from these states
}
}
*state = Cancelled;
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Poll the inner state for a value
fn poll(&self, _: &mut Task) -> Poll<T, E> {
let mut state = self.state.lock().unwrap();
if state.is_complete() {
match state.take() {
Completed(Some(Ok(v))) => Poll::Ok(v),
Completed(Some(Err(e))) => Poll::Err(e),
Completed(None) => panic!("Complete dropped without producing a value"),
Consumed => panic!("Val already consumed"),
_ => unreachable!(),
}
} else {
Poll::NotReady
}
}
/// Associate completion with the given task
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_consumer_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
// A little hacky, but implementing Future on Inner allows losing the generics
// on Cancellation
trait SyncFuture: Send + Sync +'static {
fn poll(&self, task: &mut Task) -> Poll<bool, ()>;
fn schedule(&self, task: &mut Task);
}
impl<T, E> SyncFuture for Inner<T, E>
where T: Send +'static,
E: Send +'static,
{
fn poll(&self, _: &mut Task) -> Poll<bool, ()> {
let state = self.state.lock().unwrap();
match *state {
Init {.. } => Poll::NotReady,
Cancelled => Poll::Ok(true),
_ => Poll::Ok(false),
}
}
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_cancellation_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
impl<T, E> State<T, E> {
fn in_flight(&self) -> bool {
match *self {
Init {.. } => true,
_ => false,
}
}
/// Returns true if in a completed state.
fn is_complete(&self) -> bool {
match *self {
Completed(_) | Consumed => true,
_ => false,
}
}
fn set_consumer_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut consumer,.. } => *consumer = Some(cb),
_ => panic!("unexpected state"),
}
}
fn set_cancellation_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut cancellation,.. } => *cancellation = Some(cb),
_ => panic!("unexpected state"),
}
}
/// Sets the current state to Consumed and returns the original value
fn take(&mut self) -> State<T, E> {
mem::replace(self, State::Consumed)
}
}
trait FnBox: Send +'static {
fn call_box(self: Box<Self>);
}
impl<F> FnBox for F
where F: FnOnce() + Send +'static
{
fn call_box(self: Box<F>) {
(*self)()
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::Future;
use std::sync::mpsc;
#[test]
fn test_complete_after_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
c.complete(123);
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_complete_before_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.complete(123);
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_polling_aborted_future_panics() {
use std::thread;
let res = thread::spawn(|| {
let (c, val) = pair::<u32, ()>();
val.then(move |res| {
println!("WAT: {:?}", res);
res
}).forget();
c.abort();
});
assert!(res.join().is_err());
}
#[test]
fn test_cancellation_future() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.cancellation().then(move |res| {
tx.send(123).unwrap();
res
}).forget();
assert!(rx.try_recv().is_err());
drop(val);
assert_eq!(123, rx.recv().unwrap());
}
}
| {
self.inner.complete(Some(Err(err)), false);
} | identifier_body |
parser.rs | #[cfg(feature = "encoding")]
use encoding_rs::UTF_8;
use crate::encoding::Decoder;
use crate::errors::{Error, Result};
use crate::events::{BytesCData, BytesDecl, BytesEnd, BytesStart, BytesText, Event};
#[cfg(feature = "encoding")]
use crate::reader::EncodingRef;
use crate::reader::{is_whitespace, BangType, ParseState};
use memchr;
/// A struct that holds a current parse state and a parser configuration.
/// It is independent on a way of reading data: the reader feed data into it and
/// get back produced [`Event`]s.
#[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize,
/// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn | <'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b|!is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b|!is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b|!b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name!= expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() {
self.encoding = EncodingRef::XmlDetected(encoding);
}
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when.check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state = ParseState::ClosedTag;
let name = self
.opened_buffer
.split_off(self.opened_starts.pop().unwrap());
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// Get the decoder, used to decode bytes, read by this reader, to the strings.
///
/// If `encoding` feature is enabled, the used encoding may change after
/// parsing the XML declaration, otherwise encoding is fixed to UTF-8.
///
/// If `encoding` feature is enabled and no encoding is specified in declaration,
/// defaults to UTF-8.
pub fn decoder(&self) -> Decoder {
Decoder {
#[cfg(feature = "encoding")]
encoding: self.encoding.encoding(),
}
}
}
impl Default for Parser {
fn default() -> Self {
Self {
offset: 0,
state: ParseState::Init,
expand_empty_elements: false,
trim_text_start: false,
trim_text_end: false,
trim_markup_names_in_closing_tags: true,
check_end_names: true,
check_comments: false,
opened_buffer: Vec::new(),
opened_starts: Vec::new(),
#[cfg(feature = "encoding")]
encoding: EncodingRef::Implicit(UTF_8),
}
}
}
| emit_text | identifier_name |
parser.rs | #[cfg(feature = "encoding")]
use encoding_rs::UTF_8;
use crate::encoding::Decoder;
use crate::errors::{Error, Result};
use crate::events::{BytesCData, BytesDecl, BytesEnd, BytesStart, BytesText, Event};
#[cfg(feature = "encoding")]
use crate::reader::EncodingRef;
use crate::reader::{is_whitespace, BangType, ParseState};
use memchr;
/// A struct that holds a current parse state and a parser configuration.
/// It is independent on a way of reading data: the reader feed data into it and
/// get back produced [`Event`]s. | /// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn emit_text<'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b|!is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b|!is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b|!b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name!= expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() {
self.encoding = EncodingRef::XmlDetected(encoding);
}
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when.check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state = ParseState::ClosedTag;
let name = self
.opened_buffer
.split_off(self.opened_starts.pop().unwrap());
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// Get the decoder, used to decode bytes, read by this reader, to the strings.
///
/// If `encoding` feature is enabled, the used encoding may change after
/// parsing the XML declaration, otherwise encoding is fixed to UTF-8.
///
/// If `encoding` feature is enabled and no encoding is specified in declaration,
/// defaults to UTF-8.
pub fn decoder(&self) -> Decoder {
Decoder {
#[cfg(feature = "encoding")]
encoding: self.encoding.encoding(),
}
}
}
impl Default for Parser {
fn default() -> Self {
Self {
offset: 0,
state: ParseState::Init,
expand_empty_elements: false,
trim_text_start: false,
trim_text_end: false,
trim_markup_names_in_closing_tags: true,
check_end_names: true,
check_comments: false,
opened_buffer: Vec::new(),
opened_starts: Vec::new(),
#[cfg(feature = "encoding")]
encoding: EncodingRef::Implicit(UTF_8),
}
}
} | #[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize, | random_line_split |
parser.rs | #[cfg(feature = "encoding")]
use encoding_rs::UTF_8;
use crate::encoding::Decoder;
use crate::errors::{Error, Result};
use crate::events::{BytesCData, BytesDecl, BytesEnd, BytesStart, BytesText, Event};
#[cfg(feature = "encoding")]
use crate::reader::EncodingRef;
use crate::reader::{is_whitespace, BangType, ParseState};
use memchr;
/// A struct that holds a current parse state and a parser configuration.
/// It is independent on a way of reading data: the reader feed data into it and
/// get back produced [`Event`]s.
#[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize,
/// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn emit_text<'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b|!is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b|!is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b|!b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name!= expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() {
self.encoding = EncodingRef::XmlDetected(encoding);
}
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when.check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state = ParseState::ClosedTag;
let name = self
.opened_buffer
.split_off(self.opened_starts.pop().unwrap());
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// Get the decoder, used to decode bytes, read by this reader, to the strings.
///
/// If `encoding` feature is enabled, the used encoding may change after
/// parsing the XML declaration, otherwise encoding is fixed to UTF-8.
///
/// If `encoding` feature is enabled and no encoding is specified in declaration,
/// defaults to UTF-8.
pub fn decoder(&self) -> Decoder |
}
impl Default for Parser {
fn default() -> Self {
Self {
offset: 0,
state: ParseState::Init,
expand_empty_elements: false,
trim_text_start: false,
trim_text_end: false,
trim_markup_names_in_closing_tags: true,
check_end_names: true,
check_comments: false,
opened_buffer: Vec::new(),
opened_starts: Vec::new(),
#[cfg(feature = "encoding")]
encoding: EncodingRef::Implicit(UTF_8),
}
}
}
| {
Decoder {
#[cfg(feature = "encoding")]
encoding: self.encoding.encoding(),
}
} | identifier_body |
parser.rs | #[cfg(feature = "encoding")]
use encoding_rs::UTF_8;
use crate::encoding::Decoder;
use crate::errors::{Error, Result};
use crate::events::{BytesCData, BytesDecl, BytesEnd, BytesStart, BytesText, Event};
#[cfg(feature = "encoding")]
use crate::reader::EncodingRef;
use crate::reader::{is_whitespace, BangType, ParseState};
use memchr;
/// A struct that holds a current parse state and a parser configuration.
/// It is independent on a way of reading data: the reader feed data into it and
/// get back produced [`Event`]s.
#[derive(Clone)]
pub(super) struct Parser {
/// Number of bytes read from the source of data since the parser was created
pub offset: usize,
/// Defines how to process next byte
pub state: ParseState,
/// Expand empty element into an opening and closing element
pub expand_empty_elements: bool,
/// Trims leading whitespace in Text events, skip the element if text is empty
pub trim_text_start: bool,
/// Trims trailing whitespace in Text events.
pub trim_text_end: bool,
/// Trims trailing whitespaces from markup names in closing tags `</a >`
pub trim_markup_names_in_closing_tags: bool,
/// Check if [`Event::End`] nodes match last [`Event::Start`] node
pub check_end_names: bool,
/// Check if comments contains `--` (false per default)
pub check_comments: bool,
/// All currently Started elements which didn't have a matching
/// End element yet.
///
/// For an XML
///
/// ```xml
/// <root><one/><inner attr="value">|<tag></inner></root>
/// ```
/// when cursor at the `|` position buffer contains:
///
/// ```text
/// rootinner
/// ^ ^
/// ```
///
/// The `^` symbols shows which positions stored in the [`Self::opened_starts`]
/// (0 and 4 in that case).
opened_buffer: Vec<u8>,
/// Opened name start indexes into [`Self::opened_buffer`]. See documentation
/// for that field for details
opened_starts: Vec<usize>,
#[cfg(feature = "encoding")]
/// Reference to the encoding used to read an XML
pub encoding: EncodingRef,
}
impl Parser {
/// Trims whitespaces from `bytes`, if required, and returns a [`Text`] event.
///
/// # Parameters
/// - `bytes`: data from the start of stream to the first `<` or from `>` to `<`
///
/// [`Text`]: Event::Text
pub fn emit_text<'b>(&mut self, bytes: &'b [u8]) -> Result<Event<'b>> {
let mut content = bytes;
if self.trim_text_end {
// Skip the ending '<'
let len = bytes
.iter()
.rposition(|&b|!is_whitespace(b))
.map_or_else(|| bytes.len(), |p| p + 1);
content = &bytes[..len];
}
Ok(Event::Text(BytesText::wrap(content, self.decoder())))
}
/// reads `BytesElement` starting with a `!`,
/// return `Comment`, `CData` or `DocType` event
pub fn emit_bang<'b>(&mut self, bang_type: BangType, buf: &'b [u8]) -> Result<Event<'b>> {
let uncased_starts_with = |string: &[u8], prefix: &[u8]| {
string.len() >= prefix.len() && string[..prefix.len()].eq_ignore_ascii_case(prefix)
};
let len = buf.len();
match bang_type {
BangType::Comment if buf.starts_with(b"!--") => {
debug_assert!(buf.ends_with(b"--"));
if self.check_comments {
// search if '--' not in comments
if let Some(p) = memchr::memchr_iter(b'-', &buf[3..len - 2])
.position(|p| buf[3 + p + 1] == b'-')
{
self.offset += len - p;
return Err(Error::UnexpectedToken("--".to_string()));
}
}
Ok(Event::Comment(BytesText::wrap(
&buf[3..len - 2],
self.decoder(),
)))
}
BangType::CData if uncased_starts_with(buf, b"![CDATA[") => {
debug_assert!(buf.ends_with(b"]]"));
Ok(Event::CData(BytesCData::wrap(
&buf[8..len - 2],
self.decoder(),
)))
}
BangType::DocType if uncased_starts_with(buf, b"!DOCTYPE") => {
let start = buf[8..]
.iter()
.position(|b|!is_whitespace(*b))
.unwrap_or(len - 8);
if start + 8 >= len {
return Err(Error::EmptyDocType);
}
Ok(Event::DocType(BytesText::wrap(
&buf[8 + start..],
self.decoder(),
)))
}
_ => Err(bang_type.to_err()),
}
}
/// Wraps content of `buf` into the [`Event::End`] event. Does the check that
/// end name matches the last opened start name if `self.check_end_names` is set.
pub fn emit_end<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
// XML standard permits whitespaces after the markup name in closing tags.
// Let's strip them from the buffer before comparing tag names.
let name = if self.trim_markup_names_in_closing_tags {
if let Some(pos_end_name) = buf[1..].iter().rposition(|&b|!b.is_ascii_whitespace()) {
let (name, _) = buf[1..].split_at(pos_end_name + 1);
name
} else {
&buf[1..]
}
} else {
&buf[1..]
};
let decoder = self.decoder();
let mismatch_err = |expected: String, found: &[u8], offset: &mut usize| {
*offset -= buf.len();
Err(Error::EndEventMismatch {
expected,
found: decoder.decode(found).unwrap_or_default().into_owned(),
})
};
// Get the index in self.opened_buffer of the name of the last opened tag
match self.opened_starts.pop() {
Some(start) => {
if self.check_end_names {
let expected = &self.opened_buffer[start..];
if name!= expected {
let expected = decoder.decode(expected).unwrap_or_default().into_owned();
// #513: In order to allow error recovery we should drop content of the buffer
self.opened_buffer.truncate(start);
return mismatch_err(expected, name, &mut self.offset);
}
}
self.opened_buffer.truncate(start);
}
None => {
if self.check_end_names {
return mismatch_err("".to_string(), &buf[1..], &mut self.offset);
}
}
}
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// reads `BytesElement` starting with a `?`,
/// return `Decl` or `PI` event
pub fn emit_question_mark<'b>(&mut self, buf: &'b [u8]) -> Result<Event<'b>> {
let len = buf.len();
if len > 2 && buf[len - 1] == b'?' {
if len > 5 && &buf[1..4] == b"xml" && is_whitespace(buf[4]) {
let event = BytesDecl::from_start(BytesStart::wrap(&buf[1..len - 1], 3));
// Try getting encoding from the declaration event
#[cfg(feature = "encoding")]
if self.encoding.can_be_refined() {
if let Some(encoding) = event.encoder() |
}
Ok(Event::Decl(event))
} else {
Ok(Event::PI(BytesText::wrap(&buf[1..len - 1], self.decoder())))
}
} else {
self.offset -= len;
Err(Error::UnexpectedEof("XmlDecl".to_string()))
}
}
/// Converts content of a tag to a `Start` or an `Empty` event
///
/// # Parameters
/// - `content`: Content of a tag between `<` and `>`
pub fn emit_start<'b>(&mut self, content: &'b [u8]) -> Result<Event<'b>> {
let len = content.len();
let name_end = content
.iter()
.position(|&b| is_whitespace(b))
.unwrap_or(len);
if let Some(&b'/') = content.last() {
// This is self-closed tag `<something/>`
let name_len = if name_end < len { name_end } else { len - 1 };
let event = BytesStart::wrap(&content[..len - 1], name_len);
if self.expand_empty_elements {
self.state = ParseState::Empty;
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_len]);
Ok(Event::Start(event))
} else {
Ok(Event::Empty(event))
}
} else {
// #514: Always store names event when.check_end_names == false,
// because checks can be temporary disabled and when they would be
// enabled, we should have that information
self.opened_starts.push(self.opened_buffer.len());
self.opened_buffer.extend(&content[..name_end]);
Ok(Event::Start(BytesStart::wrap(content, name_end)))
}
}
#[inline]
pub fn close_expanded_empty(&mut self) -> Result<Event<'static>> {
self.state = ParseState::ClosedTag;
let name = self
.opened_buffer
.split_off(self.opened_starts.pop().unwrap());
Ok(Event::End(BytesEnd::wrap(name.into())))
}
/// Get the decoder, used to decode bytes, read by this reader, to the strings.
///
/// If `encoding` feature is enabled, the used encoding may change after
/// parsing the XML declaration, otherwise encoding is fixed to UTF-8.
///
/// If `encoding` feature is enabled and no encoding is specified in declaration,
/// defaults to UTF-8.
pub fn decoder(&self) -> Decoder {
Decoder {
#[cfg(feature = "encoding")]
encoding: self.encoding.encoding(),
}
}
}
impl Default for Parser {
fn default() -> Self {
Self {
offset: 0,
state: ParseState::Init,
expand_empty_elements: false,
trim_text_start: false,
trim_text_end: false,
trim_markup_names_in_closing_tags: true,
check_end_names: true,
check_comments: false,
opened_buffer: Vec::new(),
opened_starts: Vec::new(),
#[cfg(feature = "encoding")]
encoding: EncodingRef::Implicit(UTF_8),
}
}
}
| {
self.encoding = EncodingRef::XmlDetected(encoding);
} | conditional_block |
context.rs | use super::{
branch_expander, data_expander, heap2stack, ir::*, normalizer, rewriter, simplifier, traverser,
};
use crate::ast;
use crate::module::ModuleSet;
use derive_new::new;
use if_chain::if_chain;
use std::collections::{hash_map, BTreeSet, HashMap};
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct Context {
ct_id_gen: CtIdGen,
rt_id_gen: RtIdGen,
ct_mapping: HashMap<CtKey, CtId>,
rt_mapping: HashMap<RtKey, RtId>,
ct_defs: HashMap<CtId, ContextCtDef>,
data_expansions: HashMap<CtId, data_expander::DataExpansion>,
next_generation: Generation,
}
impl Context {
pub fn new() -> Self {
Self {
ct_id_gen: CtIdGen::new(),
rt_id_gen: RtIdGen::new(),
ct_mapping: HashMap::new(),
rt_mapping: HashMap::new(),
ct_defs: HashMap::new(),
data_expansions: HashMap::new(),
next_generation: Generation(0),
}
}
pub fn def(&self, id: CtId) -> Option<&Arc<CtDef>> {
self.ct_defs
.get(&id)
.filter(|cdef| cdef.is_populated())
.map(|cdef| &cdef.def)
}
pub fn defs(&self) -> impl Iterator<Item = (CtId, &Arc<CtDef>)> {
self.ct_defs
.iter()
.filter(|(_, cdef)| cdef.is_populated())
.map(|(id, cdef)| (*id, &cdef.def))
}
pub fn populate<T>(
&mut self,
src: &T,
set: &impl ModuleSet,
) -> (T::Dest, impl Iterator<Item = (CtId, &Arc<CtDef>)>)
where
T: simplifier::Simplify,
T::Dest: traverser::Traverse + rewriter::Rewrite,
{
let mut dest = simplifier::simplify(src, &mut SimplifierContext::new(self, set));
normalizer::normalize(&mut dest, self);
let generation = self.next_generation;
self.next_generation.0 += 1;
// Assign generation information to each CtDef generated in this populate pass.
let mut generation_defs = GenerationCollector::collect(&dest, self);
for id in generation_defs.iter() {
self.ct_defs.get_mut(id).unwrap().generation = Some(generation);
}
DataExpansionComputetor::compute(generation, &mut generation_defs, self);
let mut target = CanonicalizeTarget::new(&generation_defs, &mut dest, &mut self.ct_defs);
data_expander::expand(&mut target, &self.data_expansions);
branch_expander::expand(
&mut target,
&mut MatchExpander::new(&mut self.rt_id_gen, &self.data_expansions),
);
heap2stack::run(&mut target);
// Possible optimizations that are not implemented:
// * Closure inlining: we can inline closure immediate calls like $<f>{<env>}(..)
// * More escape analysis to promote heap allocations to stack allocations
let ct_defs = &self.ct_defs;
let related_defs =
generation_defs
.into_iter()
.filter_map(move |id| match ct_defs.get(&id).unwrap() {
cdef if cdef.is_populated() => Some((id, &cdef.def)),
_ => None,
});
(dest, related_defs)
}
fn bind_ct(&mut self, key: CtKey, build: impl FnOnce(&mut Self) -> Option<CtDef>) -> CtId {
match self.ct_mapping.entry(key) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.ct_id_gen.next();
e.insert(id);
if let Some(def) = build(self) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
self.ct_defs
.insert(id, ContextCtDef::new(phase, None, Arc::new(def)));
}
id
}
}
}
}
impl normalizer::Env for Context {
fn instantiate(&mut self, id: CtId, args: Vec<Ct>) -> CtId {
self.bind_ct(CtKey::Inst(id, args.clone()), move |self_| {
let def = match self_.ct_defs.get(&id) {
Some(cdef) => &cdef.def,
None => panic!("Attempt to instantiate {}: which is not a definition", id),
};
match def.as_ref() {
CtDef::Generic(params, ct) => {
assert_eq!(params.len(), args.len());
let mut ct = ct.as_ref().clone();
rewriter::replace_ct(&mut ct, params.iter().copied().zip(args).collect());
Some(ct)
}
_ => panic!(
"Attempt to instantiate {}: which is not a generic definition",
id
),
}
})
}
fn get_processing_ct_def(&mut self, id: CtId) -> Option<normalizer::ProcessingCtDef> {
let cdef = self.ct_defs.get(&id)?;
let mut def = Arc::clone(&cdef.def);
let is_normalized = match cdef.phase {
Phase::Generalized => false,
Phase::Instantiated => {
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalizing;
def = Arc::new({
let mut def = def.as_ref().clone();
normalizer::normalize(&mut def, self);
def
});
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalized;
self.ct_defs.get_mut(&id).unwrap().def = Arc::clone(&def);
true
}
Phase::Normalizing => false,
Phase::Normalized => true,
};
Some(normalizer::ProcessingCtDef { is_normalized, def })
}
fn alloc_ct(&mut self) -> CtId {
self.ct_id_gen.next()
}
fn define_ct(&mut self, id: CtId, def: CtDef) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
let def = ContextCtDef::new(phase, None, Arc::new(def));
if self.ct_defs.insert(id, def).is_some() {
panic!("Duplicate definition of {}", id);
}
}
fn alloc_rt(&mut self) -> RtId {
self.rt_id_gen.next()
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Hash)]
enum CtKey {
Construct(ast::Construct),
Inst(CtId, Vec<Ct>),
}
type RtKey = ast::Construct;
#[derive(Debug, Clone, new)]
struct ContextCtDef {
phase: Phase,
generation: Option<Generation>,
def: Arc<CtDef>,
}
impl ContextCtDef {
fn is_populated(&self) -> bool {
self.phase == Phase::Normalized
&& self.generation.is_some()
&&!matches!(*self.def, CtDef::Data(_))
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
enum Phase {
Generalized,
Instantiated,
Normalizing,
Normalized,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
struct Generation(usize);
#[derive(Debug, new)]
struct SimplifierContext<'a,'m, M> {
context: &'a mut Context,
module_set: &'m M,
}
impl<'a,'m, M: ModuleSet> simplifier::Env<'m> for SimplifierContext<'a,'m, M> {
type ModuleSet = M;
fn module_set(&self) -> &'m Self::ModuleSet {
self.module_set
}
fn alloc_ct(&mut self) -> CtId {
self.context.ct_id_gen.next()
}
fn issue_ct(&mut self, construct: impl Into<ast::Construct>) -> CtId {
let module_set = self.module_set;
let construct = construct.into();
self.context.bind_ct(CtKey::Construct(construct), |ctx| {
SimplifierContext::new(ctx, module_set).simplify_def(construct)
})
}
fn alloc_rt(&mut self) -> RtId {
self.context.rt_id_gen.next()
}
fn issue_rt(&mut self, construct: impl Into<ast::Construct>) -> RtId {
match self.context.rt_mapping.entry(construct.into()) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.context.rt_id_gen.next();
e.insert(id);
id
}
}
}
}
#[derive(Debug, new)]
struct DataExpansionComputetor<'a> {
ct_id_gen: &'a mut CtIdGen,
data_expansions: &'a mut HashMap<CtId, data_expander::DataExpansion>,
#[new(default)]
defs: HashMap<CtId, CtDef>,
}
impl<'a> DataExpansionComputetor<'a> {
fn compute(generation: Generation, generation_defs: &mut BTreeSet<CtId>, ctx: &'a mut Context) {
let mut env = Self::new(&mut ctx.ct_id_gen, &mut ctx.data_expansions);
data_expander::compute(
{
let ct_defs = &ctx.ct_defs;
generation_defs
.iter()
.filter_map(move |id| match *ct_defs.get(id).unwrap().def {
CtDef::Data(ref data) => Some((*id, data)),
_ => None,
})
},
&mut env,
);
for (id, def) in env.defs {
let cdef = ContextCtDef::new(Phase::Normalized, Some(generation), Arc::new(def));
ctx.ct_defs.insert(id, cdef);
generation_defs.insert(id);
}
}
}
impl<'a> data_expander::Env for DataExpansionComputetor<'a> {
fn add_def(&mut self, def: CtDef) -> CtId {
let id = self.ct_id_gen.next();
self.defs.insert(id, def);
id
} | fn data_expansions(&mut self) -> &mut HashMap<CtId, data_expander::DataExpansion> {
self.data_expansions
}
}
#[derive(Debug, new)]
struct MatchExpander<'a> {
rt_id_gen: &'a mut RtIdGen,
data_expansions: &'a HashMap<CtId, data_expander::DataExpansion>,
}
impl<'a> branch_expander::Env for MatchExpander<'a> {
fn alloc_rt(&mut self) -> RtId {
self.rt_id_gen.next()
}
}
#[derive(Debug, new)]
struct GenerationCollector<'a> {
generation_defs: BTreeSet<CtId>,
context: &'a Context,
}
impl<'a> GenerationCollector<'a> {
fn collect(dest: &impl traverser::Traverse, ctx: &'a Context) -> BTreeSet<CtId> {
let mut collector = Self::new(BTreeSet::new(), ctx);
let _ = traverser::traverse(dest, &mut collector);
collector.generation_defs
}
}
impl traverser::Traverser for GenerationCollector<'_> {
type Error = ();
fn after_ct(&mut self, ct: &Ct) -> Result<(), ()> {
if_chain! {
if let Ct::Id(id) = ct;
if let Some(cdef) = self.context.ct_defs.get(id);
if cdef.generation.is_none() && self.generation_defs.insert(*id);
then {
self.traverse(cdef.def.as_ref())?;
}
}
Ok(())
}
}
#[derive(Debug, new)]
struct CanonicalizeTarget<'a, D> {
generation_defs: &'a BTreeSet<CtId>,
dest: &'a mut D,
ct_defs: &'a mut HashMap<CtId, ContextCtDef>,
}
impl<'a, D: rewriter::Rewrite> rewriter::Rewrite for CanonicalizeTarget<'a, D> {
fn rewrite<T: rewriter::Rewriter>(&mut self, rewriter: &mut T) -> Result<(), T::Error> {
rewriter.rewrite(self.dest)?;
for id in self.generation_defs {
let cdef = self.ct_defs.get_mut(id).unwrap();
rewriter.rewrite(Arc::make_mut(&mut cdef.def))?;
}
Ok(())
}
} | random_line_split |
|
context.rs | use super::{
branch_expander, data_expander, heap2stack, ir::*, normalizer, rewriter, simplifier, traverser,
};
use crate::ast;
use crate::module::ModuleSet;
use derive_new::new;
use if_chain::if_chain;
use std::collections::{hash_map, BTreeSet, HashMap};
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct Context {
ct_id_gen: CtIdGen,
rt_id_gen: RtIdGen,
ct_mapping: HashMap<CtKey, CtId>,
rt_mapping: HashMap<RtKey, RtId>,
ct_defs: HashMap<CtId, ContextCtDef>,
data_expansions: HashMap<CtId, data_expander::DataExpansion>,
next_generation: Generation,
}
impl Context {
pub fn new() -> Self {
Self {
ct_id_gen: CtIdGen::new(),
rt_id_gen: RtIdGen::new(),
ct_mapping: HashMap::new(),
rt_mapping: HashMap::new(),
ct_defs: HashMap::new(),
data_expansions: HashMap::new(),
next_generation: Generation(0),
}
}
pub fn def(&self, id: CtId) -> Option<&Arc<CtDef>> {
self.ct_defs
.get(&id)
.filter(|cdef| cdef.is_populated())
.map(|cdef| &cdef.def)
}
pub fn defs(&self) -> impl Iterator<Item = (CtId, &Arc<CtDef>)> {
self.ct_defs
.iter()
.filter(|(_, cdef)| cdef.is_populated())
.map(|(id, cdef)| (*id, &cdef.def))
}
pub fn populate<T>(
&mut self,
src: &T,
set: &impl ModuleSet,
) -> (T::Dest, impl Iterator<Item = (CtId, &Arc<CtDef>)>)
where
T: simplifier::Simplify,
T::Dest: traverser::Traverse + rewriter::Rewrite,
{
let mut dest = simplifier::simplify(src, &mut SimplifierContext::new(self, set));
normalizer::normalize(&mut dest, self);
let generation = self.next_generation;
self.next_generation.0 += 1;
// Assign generation information to each CtDef generated in this populate pass.
let mut generation_defs = GenerationCollector::collect(&dest, self);
for id in generation_defs.iter() {
self.ct_defs.get_mut(id).unwrap().generation = Some(generation);
}
DataExpansionComputetor::compute(generation, &mut generation_defs, self);
let mut target = CanonicalizeTarget::new(&generation_defs, &mut dest, &mut self.ct_defs);
data_expander::expand(&mut target, &self.data_expansions);
branch_expander::expand(
&mut target,
&mut MatchExpander::new(&mut self.rt_id_gen, &self.data_expansions),
);
heap2stack::run(&mut target);
// Possible optimizations that are not implemented:
// * Closure inlining: we can inline closure immediate calls like $<f>{<env>}(..)
// * More escape analysis to promote heap allocations to stack allocations
let ct_defs = &self.ct_defs;
let related_defs =
generation_defs
.into_iter()
.filter_map(move |id| match ct_defs.get(&id).unwrap() {
cdef if cdef.is_populated() => Some((id, &cdef.def)),
_ => None,
});
(dest, related_defs)
}
fn bind_ct(&mut self, key: CtKey, build: impl FnOnce(&mut Self) -> Option<CtDef>) -> CtId {
match self.ct_mapping.entry(key) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.ct_id_gen.next();
e.insert(id);
if let Some(def) = build(self) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
self.ct_defs
.insert(id, ContextCtDef::new(phase, None, Arc::new(def)));
}
id
}
}
}
}
impl normalizer::Env for Context {
fn instantiate(&mut self, id: CtId, args: Vec<Ct>) -> CtId {
self.bind_ct(CtKey::Inst(id, args.clone()), move |self_| {
let def = match self_.ct_defs.get(&id) {
Some(cdef) => &cdef.def,
None => panic!("Attempt to instantiate {}: which is not a definition", id),
};
match def.as_ref() {
CtDef::Generic(params, ct) => {
assert_eq!(params.len(), args.len());
let mut ct = ct.as_ref().clone();
rewriter::replace_ct(&mut ct, params.iter().copied().zip(args).collect());
Some(ct)
}
_ => panic!(
"Attempt to instantiate {}: which is not a generic definition",
id
),
}
})
}
fn get_processing_ct_def(&mut self, id: CtId) -> Option<normalizer::ProcessingCtDef> {
let cdef = self.ct_defs.get(&id)?;
let mut def = Arc::clone(&cdef.def);
let is_normalized = match cdef.phase {
Phase::Generalized => false,
Phase::Instantiated => {
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalizing;
def = Arc::new({
let mut def = def.as_ref().clone();
normalizer::normalize(&mut def, self);
def
});
self.ct_defs.get_mut(&id).unwrap().phase = Phase::Normalized;
self.ct_defs.get_mut(&id).unwrap().def = Arc::clone(&def);
true
}
Phase::Normalizing => false,
Phase::Normalized => true,
};
Some(normalizer::ProcessingCtDef { is_normalized, def })
}
fn alloc_ct(&mut self) -> CtId {
self.ct_id_gen.next()
}
fn define_ct(&mut self, id: CtId, def: CtDef) {
let phase = match def {
CtDef::Generic(_, _) => Phase::Generalized,
_ => Phase::Instantiated,
};
let def = ContextCtDef::new(phase, None, Arc::new(def));
if self.ct_defs.insert(id, def).is_some() {
panic!("Duplicate definition of {}", id);
}
}
fn alloc_rt(&mut self) -> RtId {
self.rt_id_gen.next()
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Hash)]
enum CtKey {
Construct(ast::Construct),
Inst(CtId, Vec<Ct>),
}
type RtKey = ast::Construct;
#[derive(Debug, Clone, new)]
struct ContextCtDef {
phase: Phase,
generation: Option<Generation>,
def: Arc<CtDef>,
}
impl ContextCtDef {
fn is_populated(&self) -> bool {
self.phase == Phase::Normalized
&& self.generation.is_some()
&&!matches!(*self.def, CtDef::Data(_))
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
enum Phase {
Generalized,
Instantiated,
Normalizing,
Normalized,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)]
struct Generation(usize);
#[derive(Debug, new)]
struct SimplifierContext<'a,'m, M> {
context: &'a mut Context,
module_set: &'m M,
}
impl<'a,'m, M: ModuleSet> simplifier::Env<'m> for SimplifierContext<'a,'m, M> {
type ModuleSet = M;
fn module_set(&self) -> &'m Self::ModuleSet {
self.module_set
}
fn alloc_ct(&mut self) -> CtId {
self.context.ct_id_gen.next()
}
fn issue_ct(&mut self, construct: impl Into<ast::Construct>) -> CtId {
let module_set = self.module_set;
let construct = construct.into();
self.context.bind_ct(CtKey::Construct(construct), |ctx| {
SimplifierContext::new(ctx, module_set).simplify_def(construct)
})
}
fn alloc_rt(&mut self) -> RtId {
self.context.rt_id_gen.next()
}
fn issue_rt(&mut self, construct: impl Into<ast::Construct>) -> RtId {
match self.context.rt_mapping.entry(construct.into()) {
hash_map::Entry::Occupied(e) => *e.get(),
hash_map::Entry::Vacant(e) => {
let id = self.context.rt_id_gen.next();
e.insert(id);
id
}
}
}
}
#[derive(Debug, new)]
struct DataExpansionComputetor<'a> {
ct_id_gen: &'a mut CtIdGen,
data_expansions: &'a mut HashMap<CtId, data_expander::DataExpansion>,
#[new(default)]
defs: HashMap<CtId, CtDef>,
}
impl<'a> DataExpansionComputetor<'a> {
fn compute(generation: Generation, generation_defs: &mut BTreeSet<CtId>, ctx: &'a mut Context) {
let mut env = Self::new(&mut ctx.ct_id_gen, &mut ctx.data_expansions);
data_expander::compute(
{
let ct_defs = &ctx.ct_defs;
generation_defs
.iter()
.filter_map(move |id| match *ct_defs.get(id).unwrap().def {
CtDef::Data(ref data) => Some((*id, data)),
_ => None,
})
},
&mut env,
);
for (id, def) in env.defs {
let cdef = ContextCtDef::new(Phase::Normalized, Some(generation), Arc::new(def));
ctx.ct_defs.insert(id, cdef);
generation_defs.insert(id);
}
}
}
impl<'a> data_expander::Env for DataExpansionComputetor<'a> {
fn | (&mut self, def: CtDef) -> CtId {
let id = self.ct_id_gen.next();
self.defs.insert(id, def);
id
}
fn data_expansions(&mut self) -> &mut HashMap<CtId, data_expander::DataExpansion> {
self.data_expansions
}
}
#[derive(Debug, new)]
struct MatchExpander<'a> {
rt_id_gen: &'a mut RtIdGen,
data_expansions: &'a HashMap<CtId, data_expander::DataExpansion>,
}
impl<'a> branch_expander::Env for MatchExpander<'a> {
fn alloc_rt(&mut self) -> RtId {
self.rt_id_gen.next()
}
}
#[derive(Debug, new)]
struct GenerationCollector<'a> {
generation_defs: BTreeSet<CtId>,
context: &'a Context,
}
impl<'a> GenerationCollector<'a> {
fn collect(dest: &impl traverser::Traverse, ctx: &'a Context) -> BTreeSet<CtId> {
let mut collector = Self::new(BTreeSet::new(), ctx);
let _ = traverser::traverse(dest, &mut collector);
collector.generation_defs
}
}
impl traverser::Traverser for GenerationCollector<'_> {
type Error = ();
fn after_ct(&mut self, ct: &Ct) -> Result<(), ()> {
if_chain! {
if let Ct::Id(id) = ct;
if let Some(cdef) = self.context.ct_defs.get(id);
if cdef.generation.is_none() && self.generation_defs.insert(*id);
then {
self.traverse(cdef.def.as_ref())?;
}
}
Ok(())
}
}
#[derive(Debug, new)]
struct CanonicalizeTarget<'a, D> {
generation_defs: &'a BTreeSet<CtId>,
dest: &'a mut D,
ct_defs: &'a mut HashMap<CtId, ContextCtDef>,
}
impl<'a, D: rewriter::Rewrite> rewriter::Rewrite for CanonicalizeTarget<'a, D> {
fn rewrite<T: rewriter::Rewriter>(&mut self, rewriter: &mut T) -> Result<(), T::Error> {
rewriter.rewrite(self.dest)?;
for id in self.generation_defs {
let cdef = self.ct_defs.get_mut(id).unwrap();
rewriter.rewrite(Arc::make_mut(&mut cdef.def))?;
}
Ok(())
}
}
| add_def | identifier_name |
main.rs | extern crate fixedbitset;
extern crate getopts;
extern crate libc;
extern crate regex;
extern crate toml;
extern crate num_cpus;
use regex::Regex;
use std::fs;
use std::io;
use std::io::{Read, Write, BufRead};
use std::path;
use std::sync;
use std::sync::mpsc;
use std::thread;
#[cfg(not(test))]
fn main()
{
let args : Vec<_> = std::env::args().collect();
// cli opts
let mut opts = getopts::Options::new();
opts.optmulti("f", "filters", "TOML file(s) with filter specifications", "specs.toml");
opts.optflag("h", "help", "print this help message and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
let brief = format!("Usage: {} [options] [logfile]", args[0]);
println!("{}", opts.usage(&brief[..]));
println!("\nIf `logfile` is not given, the standard input will be used.\n\nWhen no filter spec options are provided,\nlines containing the word \"error\" are selected.");
return;
}
// parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => |
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n.........\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn process_spec(spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
{
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
}
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if!spec.disable && spec.start.is_some() {
specs.push(spec);
}
}
fn try_select(spec: &Spec, lines: &Vec<String>, index: isize) -> Option<(isize, isize)>
{
let step = if spec.backward { -1 } else { 1 };
let mut cursor = index + step;
while (cursor >= 0) && (cursor < lines.len() as isize) && (cursor - index).abs() <= spec.limit {
match spec.stop {
Some(ref rx) if rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor)) }
_ => {}
};
match spec.whale {
Some(ref rx) if!rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor-step)) }
_ => {}
};
cursor += step;
}
match spec.whale {
Some(_) => { return Some((index, cursor-step)) }
_ => { return None }
};
}
#[test]
fn test_all()
{
let sample_lines = read_lines(&mut fs::File::open(&path::Path::new("tests/data/sample.txt")).unwrap());
let mut failed_files = Vec::<String>::new();
println!(""); // cargo test prepends tab to the first line, but not the rest
for entry in std::fs::read_dir(&path::Path::new("tests/data")).unwrap() {
let entry_path = entry.unwrap().path();
if entry_path.extension().unwrap().to_str().unwrap() == "toml" {
let mut specs: Vec<Spec> = vec![];
let toml_path_s = entry_path.clone().into_os_string().into_string().unwrap();
print!("testing {}... ", toml_path_s);
let _ = io::stdout().flush();
consume_specs_toml(&toml_path_s[..], &mut specs);
let expected_content_path = entry_path.with_extension("txt");
let expected_content_path_str = expected_content_path.clone().into_os_string().into_string().unwrap();
let mut expected_content = String::new();
match fs::File::open(&expected_content_path) {
Err(err) => { panic!("{}: can not open file {}: {}", toml_path_s, expected_content_path_str, err); }
Ok(ref mut f) => { f.read_to_string(&mut expected_content).unwrap(); }
};
let mut output = Vec::<u8>::new();
logselect(specs.clone(), sample_lines.clone(), &mut output);
if expected_content.as_bytes() == &output[..] {
println!("+");
} else {
failed_files.push(toml_path_s);
println!("fail\n\t{} spec(s) recognized\n--- expected ---\n{}\n--- actual ---", specs.len(), &expected_content[..]);
println!("{}", std::str::from_utf8(&output).unwrap());
println!("--- end ---");
}
}
}
if failed_files.len() > 0 {
println!("Summary of failed files:");
for ffn in failed_files { println!(" {}", ffn); }
panic!();
}
}
| { read_lines(&mut io::stdin()) } | conditional_block |
main.rs | extern crate fixedbitset;
extern crate getopts;
extern crate libc;
extern crate regex;
extern crate toml;
extern crate num_cpus;
use regex::Regex;
use std::fs;
use std::io;
use std::io::{Read, Write, BufRead};
use std::path;
use std::sync;
use std::sync::mpsc;
use std::thread;
#[cfg(not(test))]
fn main()
{
let args : Vec<_> = std::env::args().collect();
// cli opts
let mut opts = getopts::Options::new();
opts.optmulti("f", "filters", "TOML file(s) with filter specifications", "specs.toml");
opts.optflag("h", "help", "print this help message and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
let brief = format!("Usage: {} [options] [logfile]", args[0]);
println!("{}", opts.usage(&brief[..]));
println!("\nIf `logfile` is not given, the standard input will be used.\n\nWhen no filter spec options are provided,\nlines containing the word \"error\" are selected.");
return;
}
// parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => { read_lines(&mut io::stdin()) }
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n.........\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn | (spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
{
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
}
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if!spec.disable && spec.start.is_some() {
specs.push(spec);
}
}
fn try_select(spec: &Spec, lines: &Vec<String>, index: isize) -> Option<(isize, isize)>
{
let step = if spec.backward { -1 } else { 1 };
let mut cursor = index + step;
while (cursor >= 0) && (cursor < lines.len() as isize) && (cursor - index).abs() <= spec.limit {
match spec.stop {
Some(ref rx) if rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor)) }
_ => {}
};
match spec.whale {
Some(ref rx) if!rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor-step)) }
_ => {}
};
cursor += step;
}
match spec.whale {
Some(_) => { return Some((index, cursor-step)) }
_ => { return None }
};
}
#[test]
fn test_all()
{
let sample_lines = read_lines(&mut fs::File::open(&path::Path::new("tests/data/sample.txt")).unwrap());
let mut failed_files = Vec::<String>::new();
println!(""); // cargo test prepends tab to the first line, but not the rest
for entry in std::fs::read_dir(&path::Path::new("tests/data")).unwrap() {
let entry_path = entry.unwrap().path();
if entry_path.extension().unwrap().to_str().unwrap() == "toml" {
let mut specs: Vec<Spec> = vec![];
let toml_path_s = entry_path.clone().into_os_string().into_string().unwrap();
print!("testing {}... ", toml_path_s);
let _ = io::stdout().flush();
consume_specs_toml(&toml_path_s[..], &mut specs);
let expected_content_path = entry_path.with_extension("txt");
let expected_content_path_str = expected_content_path.clone().into_os_string().into_string().unwrap();
let mut expected_content = String::new();
match fs::File::open(&expected_content_path) {
Err(err) => { panic!("{}: can not open file {}: {}", toml_path_s, expected_content_path_str, err); }
Ok(ref mut f) => { f.read_to_string(&mut expected_content).unwrap(); }
};
let mut output = Vec::<u8>::new();
logselect(specs.clone(), sample_lines.clone(), &mut output);
if expected_content.as_bytes() == &output[..] {
println!("+");
} else {
failed_files.push(toml_path_s);
println!("fail\n\t{} spec(s) recognized\n--- expected ---\n{}\n--- actual ---", specs.len(), &expected_content[..]);
println!("{}", std::str::from_utf8(&output).unwrap());
println!("--- end ---");
}
}
}
if failed_files.len() > 0 {
println!("Summary of failed files:");
for ffn in failed_files { println!(" {}", ffn); }
panic!();
}
}
| process_spec | identifier_name |
main.rs | extern crate fixedbitset;
extern crate getopts;
extern crate libc;
extern crate regex;
extern crate toml;
extern crate num_cpus;
use regex::Regex;
use std::fs;
use std::io;
use std::io::{Read, Write, BufRead};
use std::path;
use std::sync;
use std::sync::mpsc;
use std::thread;
#[cfg(not(test))]
fn main()
{
let args : Vec<_> = std::env::args().collect();
// cli opts
let mut opts = getopts::Options::new();
opts.optmulti("f", "filters", "TOML file(s) with filter specifications", "specs.toml");
opts.optflag("h", "help", "print this help message and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
let brief = format!("Usage: {} [options] [logfile]", args[0]);
println!("{}", opts.usage(&brief[..]));
println!("\nIf `logfile` is not given, the standard input will be used.\n\nWhen no filter spec options are provided,\nlines containing the word \"error\" are selected.");
return;
}
// parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => { read_lines(&mut io::stdin()) }
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n.........\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn process_spec(spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
|
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if!spec.disable && spec.start.is_some() {
specs.push(spec);
}
}
fn try_select(spec: &Spec, lines: &Vec<String>, index: isize) -> Option<(isize, isize)>
{
let step = if spec.backward { -1 } else { 1 };
let mut cursor = index + step;
while (cursor >= 0) && (cursor < lines.len() as isize) && (cursor - index).abs() <= spec.limit {
match spec.stop {
Some(ref rx) if rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor)) }
_ => {}
};
match spec.whale {
Some(ref rx) if!rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor-step)) }
_ => {}
};
cursor += step;
}
match spec.whale {
Some(_) => { return Some((index, cursor-step)) }
_ => { return None }
};
}
#[test]
fn test_all()
{
let sample_lines = read_lines(&mut fs::File::open(&path::Path::new("tests/data/sample.txt")).unwrap());
let mut failed_files = Vec::<String>::new();
println!(""); // cargo test prepends tab to the first line, but not the rest
for entry in std::fs::read_dir(&path::Path::new("tests/data")).unwrap() {
let entry_path = entry.unwrap().path();
if entry_path.extension().unwrap().to_str().unwrap() == "toml" {
let mut specs: Vec<Spec> = vec![];
let toml_path_s = entry_path.clone().into_os_string().into_string().unwrap();
print!("testing {}... ", toml_path_s);
let _ = io::stdout().flush();
consume_specs_toml(&toml_path_s[..], &mut specs);
let expected_content_path = entry_path.with_extension("txt");
let expected_content_path_str = expected_content_path.clone().into_os_string().into_string().unwrap();
let mut expected_content = String::new();
match fs::File::open(&expected_content_path) {
Err(err) => { panic!("{}: can not open file {}: {}", toml_path_s, expected_content_path_str, err); }
Ok(ref mut f) => { f.read_to_string(&mut expected_content).unwrap(); }
};
let mut output = Vec::<u8>::new();
logselect(specs.clone(), sample_lines.clone(), &mut output);
if expected_content.as_bytes() == &output[..] {
println!("+");
} else {
failed_files.push(toml_path_s);
println!("fail\n\t{} spec(s) recognized\n--- expected ---\n{}\n--- actual ---", specs.len(), &expected_content[..]);
println!("{}", std::str::from_utf8(&output).unwrap());
println!("--- end ---");
}
}
}
if failed_files.len() > 0 {
println!("Summary of failed files:");
for ffn in failed_files { println!(" {}", ffn); }
panic!();
}
}
| {
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
} | identifier_body |
main.rs | extern crate fixedbitset;
extern crate getopts;
extern crate libc;
extern crate regex;
extern crate toml;
extern crate num_cpus;
use regex::Regex;
use std::fs;
use std::io;
use std::io::{Read, Write, BufRead};
use std::path;
use std::sync;
use std::sync::mpsc;
use std::thread;
#[cfg(not(test))]
fn main()
{
let args : Vec<_> = std::env::args().collect();
// cli opts
let mut opts = getopts::Options::new();
opts.optmulti("f", "filters", "TOML file(s) with filter specifications", "specs.toml");
opts.optflag("h", "help", "print this help message and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
let brief = format!("Usage: {} [options] [logfile]", args[0]);
println!("{}", opts.usage(&brief[..]));
println!("\nIf `logfile` is not given, the standard input will be used.\n\nWhen no filter spec options are provided,\nlines containing the word \"error\" are selected.");
return;
}
// parse toml config files
let mut specs: Vec<Spec> = vec![];
for filename in matches.opt_strs("f") {
consume_specs_toml(&filename[..], &mut specs);
}
if specs.len() == 0 {
let mut spec = Spec::new();
spec.start = Regex::new(r"\berror\b").ok();
specs.push(spec);
}
// perform
let lines = match matches.free.len() {
0 => { read_lines(&mut io::stdin()) }
1 => {
let path = path::Path::new(&matches.free[0]);
match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", matches.free[0], why.to_string()) },
Ok(ref mut f) => { read_lines(f) },
}
}
_ => { panic!("too many filename arguments ({}), expected just one", matches.free.len()) }
};
logselect(specs, lines, &mut io::stdout())
}
fn read_lines(reader: &mut io::Read) -> Vec<String>
{
let mut rv = Vec::new();
for line_res in io::BufReader::new(reader).lines() {
rv.push(line_res.unwrap());
}
return rv
}
fn logselect(specs: Vec<Spec>, lines: Vec<String>, writer: &mut io::Write)
{
let work = Work { lines : lines, specs : specs, index : sync::Mutex::new(0) };
let work = sync::Arc::new(work);
let (sender, receiver) = mpsc::channel();
let num_cpus = num_cpus::get();
let mut threads = Vec::with_capacity(num_cpus);
for _ in 0..threads.capacity() {
let sender = sender.clone();
let work = work.clone();
threads.push(thread::spawn(move|| {
loop {
let portion_size = 100;
let i = {
let mut p = work.index.lock().unwrap();
let rv = *p;
*p += portion_size as isize;
rv as usize
};
if i >= work.lines.len() {
sender.send( (-1, -1) ).unwrap();
break;
}
for line_index in i..i+portion_size {
if line_index >= work.lines.len() { break }
for spec in &work.specs {
process_spec(&spec, line_index, &work.lines, &sender);
}
}
}
}));
}
let mut selected_indexes = fixedbitset::FixedBitSet::with_capacity(work.lines.len());
let mut num_finished = 0;
while num_finished < threads.len() {
match receiver.recv().unwrap() {
(-1,-1) => { num_finished += 1 }
(a,b) => for i in a..b {
selected_indexes.set(i as usize, true);
}
}
}
// output
let mut prev_index = 0;
for index in 0..work.lines.len() {
if selected_indexes[index] {
if prev_index > 0 {
if index + 1 - prev_index > 1 {
writer.write(b"\n.........\n\n").unwrap();
}
}
writer.write(work.lines[index].as_bytes()).unwrap();
writer.write(b"\n").unwrap();
prev_index = index + 1;
}
}
}
struct Work
{
lines: Vec<String>,
specs: Vec<Spec>,
index: sync::Mutex<isize>,
}
fn process_spec(spec: &Spec, line_index: usize, lines: &Vec<String>, sender: &mpsc::Sender<(isize, isize)>)
{
if let Some(ref rx) = spec.start {
if rx.is_match(&lines[line_index][..]) {
let sel_range = if spec.stop.is_some() || spec.whale.is_some() { try_select(&spec, lines, line_index as isize) } else { Some((line_index as isize,line_index as isize)) };
if let Some((a0,b0)) = sel_range {
let (a, b) = (a0 + spec.start_offset, b0 + spec.stop_offset);
// std::cmp should have this function
fn clamp<T>(a: T, x: T, b: T) -> T where T: Ord { std::cmp::min(std::cmp::max(a, x), b) }
let last_index = (lines.len() - 1) as isize;
let (a, b) = (clamp(0, a, last_index), clamp(0, b, last_index));
// if after applying offsets the range remains nonempty
if a0 <= b0 {
sender.send( (a, b+1) ).unwrap()
} else {
sender.send( (b, a+1) ).unwrap()
}
}
}
}
}
fn consume_specs_toml(filename: &str, specs: &mut Vec<Spec>)
{
let path = path::Path::new(filename);
let mut file = match fs::File::open(&path) {
Err(why) => { panic!("can't open {}: {}", filename, why.to_string()) }
Ok(f) => f
};
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let table = match content.parse::<toml::Value>() {
Ok(toml::Value::Table(t)) => { t }
Ok(_) => { panic!("parse error in {}: root value is not a table", filename) }
Err(toml_err) => { panic!("parse error in {}: {}", filename, toml_err) }
};
consume_specs_toml_table(&table, specs);
}
#[derive(Clone)]
struct Spec
{
disable: bool,
start: Option<Regex>,
start_offset: isize,
stop: Option<Regex>,
stop_offset: isize,
whale: Option<Regex>,
backward: bool,
limit: isize,
}
impl Spec
{
fn new() -> Self
{
Spec { disable: false, start: None, start_offset: 0, stop: None, stop_offset: 0, whale: None, backward: false, limit: 1000 }
}
}
fn consume_specs_toml_table(table: &toml::value::Table, specs: &mut Vec<Spec>)
{
use toml::Value::*;
let mut spec = Spec::new();
for (key, value) in table {
match &key[..] {
"disable" => {
match *value {
Boolean(x) => { spec.disable = x }
_ => { panic!("`disable` key must be boolean") }
}
}
"start" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.start = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`start` key must be regex string") }
}
}
"start_offset" => { match *value {
Integer(ofs) => { spec.start_offset = ofs as isize; }
_ => { panic!("`start_offset` must be integer") }
} }
"stop" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.stop = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`stop` key must be regex string") }
}
}
"stop_offset" => { match *value {
Integer(ofs) => { spec.stop_offset = ofs as isize; }
_ => { panic!("`stop_offset` must be integer") }
} }
"while" => {
match *value {
String(ref rxs) => {
match Regex::new(&rxs[..]) {
Ok(rx) => { spec.whale = Some(rx) }
Err(why) => { panic!("cant compile regex: {}", why.to_string()); }
}
}
_ => { panic!("`while` key must be regex string") }
}
}
"direction" => {
match *value {
String(ref s) => { match &s[..] {
"forward" | "fwd" | "down" => { spec.backward = false }
"backward" | "backwards" | "back" | "up" => { spec.backward = true }
ss => { panic!("`direction` value '{}' unrecognized (must be 'forward' or 'backward')", ss) }
} }
_ => { panic!("`direction` must be a string") }
}
}
"limit" => { match *value {
Integer(lim) if lim > 0 => { spec.limit = lim as isize; }
_ => { panic!("`limit` must be a positive integer") }
} }
_ => { match *value {
Table(ref t) => { consume_specs_toml_table(&t, specs) }
_ => { panic!("unrecognized key: {}", key) }
} }
}
}
if!spec.disable && spec.start.is_some() {
specs.push(spec);
}
}
fn try_select(spec: &Spec, lines: &Vec<String>, index: isize) -> Option<(isize, isize)>
{
let step = if spec.backward { -1 } else { 1 };
let mut cursor = index + step;
while (cursor >= 0) && (cursor < lines.len() as isize) && (cursor - index).abs() <= spec.limit {
match spec.stop {
Some(ref rx) if rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor)) }
_ => {}
};
match spec.whale {
Some(ref rx) if!rx.is_match(&lines[cursor as usize][..]) => { return Some((index, cursor-step)) }
_ => {}
};
cursor += step;
}
match spec.whale {
Some(_) => { return Some((index, cursor-step)) }
_ => { return None }
};
}
#[test]
fn test_all()
{
let sample_lines = read_lines(&mut fs::File::open(&path::Path::new("tests/data/sample.txt")).unwrap());
let mut failed_files = Vec::<String>::new();
println!(""); // cargo test prepends tab to the first line, but not the rest
for entry in std::fs::read_dir(&path::Path::new("tests/data")).unwrap() {
let entry_path = entry.unwrap().path();
if entry_path.extension().unwrap().to_str().unwrap() == "toml" {
let mut specs: Vec<Spec> = vec![];
let toml_path_s = entry_path.clone().into_os_string().into_string().unwrap();
print!("testing {}... ", toml_path_s);
let _ = io::stdout().flush();
consume_specs_toml(&toml_path_s[..], &mut specs);
let expected_content_path = entry_path.with_extension("txt");
let expected_content_path_str = expected_content_path.clone().into_os_string().into_string().unwrap();
let mut expected_content = String::new();
match fs::File::open(&expected_content_path) {
Err(err) => { panic!("{}: can not open file {}: {}", toml_path_s, expected_content_path_str, err); }
Ok(ref mut f) => { f.read_to_string(&mut expected_content).unwrap(); }
};
let mut output = Vec::<u8>::new();
logselect(specs.clone(), sample_lines.clone(), &mut output);
if expected_content.as_bytes() == &output[..] {
println!("+"); | }
}
}
if failed_files.len() > 0 {
println!("Summary of failed files:");
for ffn in failed_files { println!(" {}", ffn); }
panic!();
}
} | } else {
failed_files.push(toml_path_s);
println!("fail\n\t{} spec(s) recognized\n--- expected ---\n{}\n--- actual ---", specs.len(), &expected_content[..]);
println!("{}", std::str::from_utf8(&output).unwrap());
println!("--- end ---"); | random_line_split |
slice_test.rs | use std::mem;
fn print_slice(slice : &[i32]) {
for i in slice{
println!("{}", i);
}
}
fn move_in_array(mut arr : [i32; 8]) {
for i in &mut arr {
*i = *i + 1;
println!("{} ", i);
}
}
pub fn slice_size_len() {
println!("{}", mem::size_of::<&[i32]>());
// println!("{}", mem::size_of::<[i32]>());
let sth = [1,2,3];
let slice = &sth[0..3];
println!("slice.len");
println!("{}",slice.len());
assert!(slice.first() == Some(&1));
}
pub fn slice_split_first() {
let slice = &mut [1,2,3,4,5][..];
slice.split_first().map(|(fst_elem, rest_slice)| {
println!("{}", fst_elem);
println!("rest");
print_slice(rest_slice);
});
}
pub fn empty_slice() {
let empty_slice : &[i32] = &[];
println!("{}", empty_slice.len());
}
pub fn bracket_operator() {
let array_boxed = [Box::new(1), Box::new(2), Box::new(3)];
let slice = &array_boxed[0..3];
let v1 = &slice[2];
// why deref doesn't work here
assert!(&**v1 == &3);
}
pub fn swap() {
println!("swap");
let mut sth = [1,2,3,4,5];
let mut_slice = &mut sth[0..3];
mut_slice.swap(0, 2);
print_slice(&mut_slice);
}
pub fn reverse() {
println!("reverse");
let mut array = [1,2,3,4,5,6,7,8];
move_in_array(array);
drop(array);
let slice = &mut array[0..];
slice.reverse();
print_slice(&slice);
}
pub fn array_iteration() {
println!("1");
let array = [2,3,4,5,6,7];
let mut iter = array.iter();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() | // without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
print_slice(slice);
slice[0] = slice[0] + 1;
}
pub fn split_mut() {
let slice = &mut [1,2,3,4,5][..];
let split = slice.split_mut(|i| {
*i == 3
});
for chunk in split {
println!("a new chunk");
for i in chunk {
*i += 1;
println!("{}", i);
}
}
}
pub fn sort_and_search() {
let mut vec = vec!(2,1,2,4,3,2,3,2,1,3,2,3,4,5);
println!("{:?}", &vec);
let slice = &mut vec[..];
slice.sort_unstable();
let res = slice.binary_search(&3);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 3 at index {}", i);
}
}
let res = slice.binary_search(&109);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 109 at index {}", i);
vec.insert(i, 109);
}
}
let slice = &mut vec[..];
println!("printing the vec after insertion");
for i in slice {
println!("{}", i);
}
}
pub fn rotate_left() {
let slice = &mut [1,2,3,4,5][..];
slice.rotate_left(1);
println!("{:?}", slice);
}
fn manual_clone_from_slice<T : Clone>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
for i in 0..len {
dst[i].clone_from(&src[i]);
}
}
pub fn clone_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.clone_from_slice(another);
manual_clone_from_slice(slice, another);
println!("{:?}", slice);
}
fn manual_copy_from_slice<T : Copy>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
unsafe {
std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), len);
}
}
pub fn copy_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.copy_from_slice(another);
manual_copy_from_slice(slice, another);
println!("{:?}", slice);
}
pub fn align_to() {
let vec : [u8 ; 7] = [1,2,3,4,5,6,7];
let (one, two, three) = unsafe {(&vec[..]).align_to::<u16>()};
println!("one");
for i in one {
println!("{}", i);
}
println!("two");
for i in two {
println!("{}", i);
}
println!("three");
for i in three {
println!("{}", i);
}
} | {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2 | identifier_body |
slice_test.rs | use std::mem;
fn print_slice(slice : &[i32]) {
for i in slice{
println!("{}", i);
}
}
fn move_in_array(mut arr : [i32; 8]) {
for i in &mut arr {
*i = *i + 1;
println!("{} ", i);
}
}
pub fn slice_size_len() {
println!("{}", mem::size_of::<&[i32]>());
// println!("{}", mem::size_of::<[i32]>());
let sth = [1,2,3];
let slice = &sth[0..3];
println!("slice.len");
println!("{}",slice.len());
assert!(slice.first() == Some(&1));
}
pub fn slice_split_first() {
let slice = &mut [1,2,3,4,5][..];
slice.split_first().map(|(fst_elem, rest_slice)| {
println!("{}", fst_elem);
println!("rest");
print_slice(rest_slice);
});
}
pub fn empty_slice() {
let empty_slice : &[i32] = &[];
println!("{}", empty_slice.len());
}
pub fn bracket_operator() {
let array_boxed = [Box::new(1), Box::new(2), Box::new(3)];
let slice = &array_boxed[0..3];
let v1 = &slice[2];
// why deref doesn't work here
assert!(&**v1 == &3);
}
pub fn swap() {
println!("swap");
let mut sth = [1,2,3,4,5];
let mut_slice = &mut sth[0..3];
mut_slice.swap(0, 2);
print_slice(&mut_slice);
}
pub fn reverse() {
println!("reverse");
let mut array = [1,2,3,4,5,6,7,8];
move_in_array(array);
drop(array);
let slice = &mut array[0..];
slice.reverse();
print_slice(&slice);
}
pub fn array_iteration() {
println!("1");
let array = [2,3,4,5,6,7];
let mut iter = array.iter();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2
// without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
print_slice(slice);
slice[0] = slice[0] + 1;
}
pub fn split_mut() {
let slice = &mut [1,2,3,4,5][..];
let split = slice.split_mut(|i| {
*i == 3
});
for chunk in split {
println!("a new chunk");
for i in chunk {
*i += 1;
println!("{}", i);
}
}
}
pub fn sort_and_search() {
let mut vec = vec!(2,1,2,4,3,2,3,2,1,3,2,3,4,5);
println!("{:?}", &vec);
let slice = &mut vec[..];
slice.sort_unstable();
let res = slice.binary_search(&3);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 3 at index {}", i);
}
}
let res = slice.binary_search(&109);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => |
}
let slice = &mut vec[..];
println!("printing the vec after insertion");
for i in slice {
println!("{}", i);
}
}
pub fn rotate_left() {
let slice = &mut [1,2,3,4,5][..];
slice.rotate_left(1);
println!("{:?}", slice);
}
fn manual_clone_from_slice<T : Clone>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
for i in 0..len {
dst[i].clone_from(&src[i]);
}
}
pub fn clone_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.clone_from_slice(another);
manual_clone_from_slice(slice, another);
println!("{:?}", slice);
}
fn manual_copy_from_slice<T : Copy>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
unsafe {
std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), len);
}
}
pub fn copy_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.copy_from_slice(another);
manual_copy_from_slice(slice, another);
println!("{:?}", slice);
}
pub fn align_to() {
let vec : [u8 ; 7] = [1,2,3,4,5,6,7];
let (one, two, three) = unsafe {(&vec[..]).align_to::<u16>()};
println!("one");
for i in one {
println!("{}", i);
}
println!("two");
for i in two {
println!("{}", i);
}
println!("three");
for i in three {
println!("{}", i);
}
} | {
println!("please insert 109 at index {}", i);
vec.insert(i, 109);
} | conditional_block |
slice_test.rs | use std::mem;
fn print_slice(slice : &[i32]) {
for i in slice{
println!("{}", i);
}
}
fn move_in_array(mut arr : [i32; 8]) {
for i in &mut arr {
*i = *i + 1;
println!("{} ", i);
}
}
pub fn slice_size_len() {
println!("{}", mem::size_of::<&[i32]>());
// println!("{}", mem::size_of::<[i32]>());
let sth = [1,2,3];
let slice = &sth[0..3];
println!("slice.len");
println!("{}",slice.len());
assert!(slice.first() == Some(&1));
}
pub fn slice_split_first() {
let slice = &mut [1,2,3,4,5][..];
slice.split_first().map(|(fst_elem, rest_slice)| {
println!("{}", fst_elem);
println!("rest");
print_slice(rest_slice);
});
}
pub fn empty_slice() {
let empty_slice : &[i32] = &[];
println!("{}", empty_slice.len());
}
pub fn bracket_operator() {
let array_boxed = [Box::new(1), Box::new(2), Box::new(3)];
let slice = &array_boxed[0..3];
let v1 = &slice[2];
// why deref doesn't work here
assert!(&**v1 == &3);
}
pub fn swap() {
println!("swap");
let mut sth = [1,2,3,4,5];
let mut_slice = &mut sth[0..3];
mut_slice.swap(0, 2);
print_slice(&mut_slice);
}
pub fn reverse() {
println!("reverse");
let mut array = [1,2,3,4,5,6,7,8];
move_in_array(array);
drop(array);
let slice = &mut array[0..];
slice.reverse();
print_slice(&slice);
}
pub fn array_iteration() {
println!("1");
let array = [2,3,4,5,6,7];
let mut iter = array.iter();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2
// without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
print_slice(slice);
slice[0] = slice[0] + 1;
}
pub fn split_mut() {
let slice = &mut [1,2,3,4,5][..];
let split = slice.split_mut(|i| {
*i == 3
});
for chunk in split {
println!("a new chunk");
for i in chunk {
*i += 1;
println!("{}", i);
}
}
}
pub fn | () {
let mut vec = vec!(2,1,2,4,3,2,3,2,1,3,2,3,4,5);
println!("{:?}", &vec);
let slice = &mut vec[..];
slice.sort_unstable();
let res = slice.binary_search(&3);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 3 at index {}", i);
}
}
let res = slice.binary_search(&109);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 109 at index {}", i);
vec.insert(i, 109);
}
}
let slice = &mut vec[..];
println!("printing the vec after insertion");
for i in slice {
println!("{}", i);
}
}
pub fn rotate_left() {
let slice = &mut [1,2,3,4,5][..];
slice.rotate_left(1);
println!("{:?}", slice);
}
fn manual_clone_from_slice<T : Clone>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
for i in 0..len {
dst[i].clone_from(&src[i]);
}
}
pub fn clone_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.clone_from_slice(another);
manual_clone_from_slice(slice, another);
println!("{:?}", slice);
}
fn manual_copy_from_slice<T : Copy>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
unsafe {
std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), len);
}
}
pub fn copy_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.copy_from_slice(another);
manual_copy_from_slice(slice, another);
println!("{:?}", slice);
}
pub fn align_to() {
let vec : [u8 ; 7] = [1,2,3,4,5,6,7];
let (one, two, three) = unsafe {(&vec[..]).align_to::<u16>()};
println!("one");
for i in one {
println!("{}", i);
}
println!("two");
for i in two {
println!("{}", i);
}
println!("three");
for i in three {
println!("{}", i);
}
} | sort_and_search | identifier_name |
slice_test.rs | use std::mem;
fn print_slice(slice : &[i32]) {
for i in slice{
println!("{}", i);
}
}
fn move_in_array(mut arr : [i32; 8]) {
for i in &mut arr { | }
pub fn slice_size_len() {
println!("{}", mem::size_of::<&[i32]>());
// println!("{}", mem::size_of::<[i32]>());
let sth = [1,2,3];
let slice = &sth[0..3];
println!("slice.len");
println!("{}",slice.len());
assert!(slice.first() == Some(&1));
}
pub fn slice_split_first() {
let slice = &mut [1,2,3,4,5][..];
slice.split_first().map(|(fst_elem, rest_slice)| {
println!("{}", fst_elem);
println!("rest");
print_slice(rest_slice);
});
}
pub fn empty_slice() {
let empty_slice : &[i32] = &[];
println!("{}", empty_slice.len());
}
pub fn bracket_operator() {
let array_boxed = [Box::new(1), Box::new(2), Box::new(3)];
let slice = &array_boxed[0..3];
let v1 = &slice[2];
// why deref doesn't work here
assert!(&**v1 == &3);
}
pub fn swap() {
println!("swap");
let mut sth = [1,2,3,4,5];
let mut_slice = &mut sth[0..3];
mut_slice.swap(0, 2);
print_slice(&mut_slice);
}
pub fn reverse() {
println!("reverse");
let mut array = [1,2,3,4,5,6,7,8];
move_in_array(array);
drop(array);
let slice = &mut array[0..];
slice.reverse();
print_slice(&slice);
}
pub fn array_iteration() {
println!("1");
let array = [2,3,4,5,6,7];
let mut iter = array.iter();
while let Some(wtf) = iter.next(){
println!("{}", wtf);
}
println!("2");
let mut array = [2,3,4,5,6,7];
let mut iter = array.iter_mut();
while let Some(wtf) = iter.next() {
*wtf = *wtf + 1;
println!("{}", wtf);
}
println!("3");
let array = [2,3,4,5,6,7];
for i in &array {
println!("{}", i);
}
println!("4");
let mut array = [2,3,4,5,6,7];
for i in &mut array {
*i = *i + 1;
println!("{}", i);
}
println!("5");
let array = [2,3,4,5,6,7];
let slice = & array[..];
for i in slice {
println!("{}", i);
}
println!("6");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice {
*i = *i + 1;
println!("{}", i);
}
println!("7");
let array = [2,3,4,5,6,7];
let slice = &array[..];
for i in slice.iter() {
println!("{}", i);
}
println!("8");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
for i in slice.iter_mut() {
*i = *i + 1;
println!("{}", i);
}
println!("9");
let array = [2,3,4,5,6,7];
let slice = & array[..];
let mut iter = slice.iter();
while let Some(i) = iter.next() {
println!("{}", i);
}
println!("10");
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
while let Some(i) = iter_mut.next() {
*i = *i + 1;
println!("{}", i);
}
}
pub fn slice_iterator_lifetime() {
let mut array = [2,3,4,5,6,7];
let slice = &mut array[..];
let mut iter_mut = slice.iter_mut();
// i1 does not borrow from iter_mut, it is treated as a
// a borrow of slice, and extends the lifetime of slice
let i1 = iter_mut.next().unwrap();
// i2 is similar as i1
let i2 = iter_mut.next().unwrap();
*i1 = *i1 + 1;
// We can not borrow slice because i1 and i2 are still in use
// let mut another_iter_mut = slice.iter_mut();
*i2 = *i2 + 1;
// due to the previous reasons, we are free to modify i1 and i2
// without errors. Here, the Rust borrow checker allows i1 and i2
// to simultaneously exists. Hence, it is important for the API designer
// to ensure that i1 and i2 do not refer to the same content in the original
// slice
println!("{}", i1);
println!("{}", i2);
// if i borrow from the generated iter_mut temporary variable,
// the following code would not compile
let i = slice.iter_mut().next().unwrap();
*i = *i + 1;
}
pub fn window() {
// Why we can't implement windows_mut?
// if we implement windows_mut, we can get two
// mutable slices that share a portion of the elements
// in the original slice.
let slice = &[1,2,3,4,5][..];
let wins = slice.windows(3);
for win in wins {
println!("start of a window");
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
// assert!(wins.next() == None);
let slice = &[1,2,3,4,5][..];
let mut wins = slice.windows(3);
for win in &mut wins {
for i in win {
println!("{}", i);
}
println!("{}", &win[0]);
}
assert!(wins.next() == None);
}
pub fn chunks() {
let slice = &[1,2,3,4,5][..];
let chunks = slice.chunks(3);
for chunk in chunks {
println!("a new chunk");
for i in chunk.iter() {
println!("{}", i);
}
}
}
pub fn chunks_mut() {
let slice = &mut [1,2,3,4,5][..];
let mut chunks_mut = slice.chunks_mut(3);
// chunk1 and chunk2 do not borrow from chunks_mut.
// instead, both of them can be viewed as special (I don't know how to characterize them)
// borrows from slice and extend the lifetime of the slice
// Let me try to explain why the following code works:
// Rust does not prevent you from creating mutable borrows, Rust only examines
// whether the things being borrowed has been borrowed before.
// I'm suspecting that Rust only inspect whether object on the right hand side
// of an assignment has been borrowed before, and it will not bother doing additional
// checking.
// In the following two lines of code, Rust only tries to inspect whether chunks_mut has
// been borrowed before. Since the lifetime of both chunk1 and chunk2 are not tied to
// chunks_mut, chunks_mut is never considered borrowed.
// chunk1 and chunk2 are actually borrowed from slice, since chunk1 is used by the third line
// how is that possible to borrow chunk2 from slice? Because Rust doesn't check it. When creating
// chunk2 variable, Rust only examines whether chunk_mut has been borrowed, it will not
// inspect whether slice has been borrowed.
// Finally, successfully created mutably reference can be freely used anywhere.
let chunk1 = chunks_mut.next().unwrap();
let chunk2 = chunks_mut.next().unwrap();
chunk1[0] = chunk1[0] + 1;
// if we try to borrow from slice, it won't pass compilation
// let sth = slice.windows(3);
chunk2[0] = chunk2[0] + 1;
for i in chunk1.iter() {
println!("{}", i);
}
for i in chunk2.iter() {
println!("{}", i);
}
let chunk = slice.chunks_mut(3).next().unwrap();
// drop(slice);
// let chunk1 = slice.chunks_mut(3).next().unwrap();
for i in chunk {
*i += 1;
println!("{}", i);
}
// I can not do something like this, [1,2,3,4,5] will be dropped
// let slice_mut = (&mut [1,2,3,4,5][..]).chunks_mut(3).next().unwrap();
// for i in slice_mut {
// *i = *i + 1;
// println!("{}", i)
// }
// I can do something like this
let slice_mut = &mut [1,2,3,4,5][..];
// Note that chunk1 borrow from slice_mut, not from chunks
let chunk1 = slice_mut.chunks_mut(3).next().unwrap();
// Note that an attemp to create another chunk will fail, because
// the bck knows that slice_mut has been borrowed before.
// reference can be transitive (I don't know if this interpretation is correct)
// let chunk2 = slice_mut.chunks_mut(3).next().unwrap();
for i in chunk1 {
*i += 1;
println!("{}", i);
}
}
pub fn split_at_mut() {
let slice = &mut [1,2,3,4,5][..];
let (fst, snd) = slice.split_at_mut(2);
// We can't do neither of these. Because slice has been
// borrowed and has an extedned lifetime to the end of this
// function.
// let (haha, hehe) = slice.split_at_mut(2);
// slice[0] = slice[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
fst[0] = fst[0] + 1;
snd[0] = snd[0] + 1;
print_slice(slice);
slice[0] = slice[0] + 1;
}
pub fn split_mut() {
let slice = &mut [1,2,3,4,5][..];
let split = slice.split_mut(|i| {
*i == 3
});
for chunk in split {
println!("a new chunk");
for i in chunk {
*i += 1;
println!("{}", i);
}
}
}
pub fn sort_and_search() {
let mut vec = vec!(2,1,2,4,3,2,3,2,1,3,2,3,4,5);
println!("{:?}", &vec);
let slice = &mut vec[..];
slice.sort_unstable();
let res = slice.binary_search(&3);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 3 at index {}", i);
}
}
let res = slice.binary_search(&109);
match res {
Ok(i) => {
println!("found {} from index {}", slice[i], i);
},
Err(i) => {
println!("please insert 109 at index {}", i);
vec.insert(i, 109);
}
}
let slice = &mut vec[..];
println!("printing the vec after insertion");
for i in slice {
println!("{}", i);
}
}
pub fn rotate_left() {
let slice = &mut [1,2,3,4,5][..];
slice.rotate_left(1);
println!("{:?}", slice);
}
fn manual_clone_from_slice<T : Clone>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
for i in 0..len {
dst[i].clone_from(&src[i]);
}
}
pub fn clone_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.clone_from_slice(another);
manual_clone_from_slice(slice, another);
println!("{:?}", slice);
}
fn manual_copy_from_slice<T : Copy>(dst : &mut [T], src : &[T]) {
assert!(dst.len() == src.len());
let len = dst.len();
unsafe {
std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), len);
}
}
pub fn copy_from_slice() {
let slice = &mut [1,2,3,4,5][..];
let another = &[2,3,4,5,6][..];
// slice.copy_from_slice(another);
manual_copy_from_slice(slice, another);
println!("{:?}", slice);
}
pub fn align_to() {
let vec : [u8 ; 7] = [1,2,3,4,5,6,7];
let (one, two, three) = unsafe {(&vec[..]).align_to::<u16>()};
println!("one");
for i in one {
println!("{}", i);
}
println!("two");
for i in two {
println!("{}", i);
}
println!("three");
for i in three {
println!("{}", i);
}
} | *i = *i + 1;
println!("{} ", i);
} | random_line_split |
session.rs | use chrono::{DateTime, Duration, Utc, Timelike, TimeZone};
use clacks_crypto::CSRNG;
use clacks_crypto::symm::AuthKey;
use clacks_mtproto::{AnyBoxedSerialize, BareSerialize, BoxedSerialize, ConstructorNumber, IntoBoxed, mtproto};
use clacks_mtproto::mtproto::wire::outbound_encrypted::OutboundEncrypted;
use clacks_mtproto::mtproto::wire::outbound_raw::OutboundRaw;
use byteorder::{LittleEndian, ByteOrder, ReadBytesExt};
use either::Either;
use rand::Rng;
use std::{cmp, io, mem};
use Result;
#[derive(Debug, Fail)]
#[fail(display = "telegram error code {}", _0)]
pub struct ErrorCode(i32);
#[derive(Debug, Fail)]
pub enum SessionFailure {
#[fail(display = "an auth key was needed, but none had been adopted")]
NoAuthKey,
#[fail(display = "a salt was needed, but none were available")]
NoSalts,
#[fail(display = "incorrect session ID received")]
BadSessionId,
#[fail(display = "incorrect salt received")]
BadSalt,
}
fn next_message_id() -> i64 {
let time = Utc::now();
let timestamp = time.timestamp() as i64;
let nano = time.nanosecond() as i64;
((timestamp << 32) | (nano & 0x_7fff_fffc))
}
#[derive(Debug, Clone)]
pub struct AppId {
pub api_id: i32,
pub api_hash: String,
}
#[derive(Debug, Clone)]
struct Salt {
valid_since: DateTime<Utc>,
valid_until: DateTime<Utc>,
salt: i64,
}
impl From<mtproto::FutureSalt> for Salt {
fn from(fs: mtproto::FutureSalt) -> Self {
Salt {
valid_since: Utc.timestamp(*fs.valid_since() as i64, 0),
valid_until: Utc.timestamp(*fs.valid_until() as i64, 0),
salt: *fs.salt(),
}
}
}
#[derive(Debug, Clone)]
pub struct Session {
session_id: i64,
temp_session_id: Option<i64>,
server_salts: Vec<Salt>,
seq_no: i32,
auth_key: Option<AuthKey>,
to_ack: Vec<i64>,
app_id: AppId,
}
#[derive(Debug, Default)]
pub struct PlainPayload {
dummy: (),
}
#[derive(Debug, Default)]
pub struct EncryptedPayload {
session_id: Option<i64>,
}
pub struct MessageBuilder<P> {
message_id: i64,
payload: mtproto::TLObject,
payload_opts: P,
}
pub type EitherMessageBuilder = MessageBuilder<Either<PlainPayload, EncryptedPayload>>;
impl<PO: Default> MessageBuilder<PO> {
fn with_message_id<P>(message_id: i64, payload: P) -> Self
where P: AnyBoxedSerialize,
{
let payload = mtproto::TLObject::new(payload).into();
MessageBuilder {
message_id, payload,
payload_opts: Default::default(),
}
}
pub fn new<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
Self::with_message_id(next_message_id(), payload)
}
}
impl<PO> MessageBuilder<PO> {
pub fn message_id(&self) -> i64 {
self.message_id
}
pub fn constructor(&self) -> ConstructorNumber {
self.payload.serialize_boxed().0
}
fn seq_no_from<SNF>(&self, seq_no_func: SNF) -> i32
where SNF: FnOnce(bool) -> i32
{
seq_no_func(is_content_message(self.constructor()))
}
pub fn into_basic_message<SNF>(self, seq_no_func: SNF) -> mtproto::manual::basic_message::BasicMessage
where SNF: FnOnce(bool) -> i32,
{
mtproto::manual::basic_message::BasicMessage {
msg_id: self.message_id,
seqno: self.seq_no_from(seq_no_func),
body: self.payload.into(),
}
}
}
impl MessageBuilder<PlainPayload> {
pub fn into_outbound_raw(self) -> OutboundRaw {
OutboundRaw {
auth_key_id: 0,
message_id: self.message_id,
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Left(self.payload_opts),
}
}
}
impl MessageBuilder<EncryptedPayload> {
pub fn with_session_id(mut self, session_id: i64) -> Self {
self.payload_opts.session_id = Some(session_id);
self
}
pub fn into_outbound_encrypted<SNF>(self, salt: i64, session_id: i64, seq_no_func: SNF) -> OutboundEncrypted
where SNF: FnOnce(bool) -> i32,
{
OutboundEncrypted {
salt,
session_id: self.payload_opts.session_id.unwrap_or(session_id),
message_id: self.message_id,
seq_no: self.seq_no_from(seq_no_func),
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Right(self.payload_opts),
}
}
}
impl EitherMessageBuilder {
pub fn plain<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<PlainPayload>::new(payload).lift()
}
pub fn encrypted<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<EncryptedPayload>::new(payload).lift()
}
pub fn separate(self) -> Either<MessageBuilder<PlainPayload>, MessageBuilder<EncryptedPayload>> {
let MessageBuilder { message_id, payload, payload_opts: e } = self;
match e {
Either::Left(payload_opts) => Either::Left(MessageBuilder { message_id, payload, payload_opts }),
Either::Right(payload_opts) => Either::Right(MessageBuilder { message_id, payload, payload_opts }),
}
}
}
fn is_content_message(n: ConstructorNumber) -> bool {
// XXX: there has to be a better way
match n {
ConstructorNumber(0x62d6b459) |
ConstructorNumber(0x73f1f8dc) => false,
_ => true,
}
}
#[derive(Debug, Clone)]
pub struct InboundMessage {
pub message_id: i64,
pub payload: Vec<u8>,
pub was_encrypted: bool,
pub seq_no: Option<i32>,
}
impl Session {
pub fn new(app_id: AppId) -> Session {
Session {
app_id,
session_id: CSRNG.gen(),
temp_session_id: None,
server_salts: vec![],
seq_no: 0,
auth_key: None,
to_ack: vec![],
}
}
fn | (&mut self) -> i32 {
let ret = self.seq_no | 1;
self.seq_no += 2;
ret
}
fn next_seq_no(&mut self, content_message: bool) -> i32 {
if content_message {
self.next_content_seq_no()
} else {
self.seq_no
}
}
fn latest_server_salt(&mut self) -> Result<i64> {
let time = {
let last_salt = match self.server_salts.last() {
Some(s) => s,
None => Err(SessionFailure::NoSalts)?,
};
// Make sure at least one salt is retained.
cmp::min(Utc::now(), last_salt.valid_until.clone())
};
self.server_salts.retain(|s| &s.valid_until >= &time);
Ok(self.server_salts.first().unwrap().salt)
}
pub fn add_server_salts<I>(&mut self, salts: I)
where I: IntoIterator<Item = mtproto::FutureSalt>,
{
self.server_salts.extend(salts.into_iter().map(Into::into));
self.server_salts.sort_by(|a, b| a.valid_since.cmp(&b.valid_since));
}
pub fn adopt_key(&mut self, authorization_key: AuthKey) {
self.auth_key = Some(authorization_key);
}
pub fn ack_id(&mut self, id: i64) {
self.to_ack.push(id);
}
fn pack_message_container<PO, I>(&mut self, payloads: I) -> mtproto::manual::MessageContainer
where I: IntoIterator<Item = MessageBuilder<PO>>,
{
let messages: Vec<_> = payloads.into_iter()
.map(|m| m.into_basic_message(|c| self.next_seq_no(c)))
.collect();
mtproto::manual::msg_container::MsgContainer {
messages: messages.into(),
}.into_boxed()
}
fn fresh_auth_key(&self) -> Result<AuthKey> {
match self.auth_key {
Some(ref key) => Ok(key.clone()),
None => Err(SessionFailure::NoAuthKey.into()),
}
}
fn pack_payload_with_acks<PO: Default>(&mut self, payload: MessageBuilder<PO>) -> MessageBuilder<PO> {
if self.to_ack.is_empty() {
return payload;
};
let acks = MessageBuilder::new(mtproto::msgs_ack::MsgsAck {
msg_ids: mem::replace(&mut self.to_ack, vec![]).into(),
}.into_boxed());
MessageBuilder::new(self.pack_message_container(vec![payload, acks]))
}
pub fn serialize_plain_message(&mut self, message: MessageBuilder<PlainPayload>) -> Result<Vec<u8>> {
Ok(message.into_outbound_raw().bare_serialized_bytes()?)
}
pub fn serialize_encrypted_message(&mut self, message: MessageBuilder<EncryptedPayload>) -> Result<Vec<u8>> {
let key = self.fresh_auth_key()?;
let message = self.pack_payload_with_acks(message)
.into_outbound_encrypted(
self.latest_server_salt()?, self.session_id,
|c| self.next_seq_no(c));
Ok(key.encrypt_message(message)?)
}
pub fn serialize_message(&mut self, message: EitherMessageBuilder) -> Result<Vec<u8>> {
match message.separate() {
Either::Left(m) => self.serialize_plain_message(m),
Either::Right(m) => self.serialize_encrypted_message(m),
}
}
pub fn bind_auth_key(&mut self, perm_key: AuthKey, expires_in: Duration) -> Result<MessageBuilder<EncryptedPayload>> {
let temp_key = self.fresh_auth_key()?;
let message_id = next_message_id();
let (session_id, bind_message) = perm_key.bind_temp_auth_key(&temp_key, expires_in, message_id)?;
self.temp_session_id = Some(session_id);
Ok({
MessageBuilder::with_message_id(message_id, bind_message)
.with_session_id(session_id)
})
}
pub fn process_message(&self, message: &[u8]) -> Result<InboundMessage> {
if message.len() == 4 {
Err(ErrorCode(LittleEndian::read_i32(&message)))?
} else if message.len() < 8 {
Err(format_err!("strange message length: {:?}", message))?
}
let mut cursor = io::Cursor::new(message);
let auth_key_id = cursor.read_i64::<LittleEndian>()?;
if auth_key_id!= 0 {
cursor.into_inner();
return self.decrypt_message(message);
}
let message_id = cursor.read_i64::<LittleEndian>()?;
let len = cursor.read_i32::<LittleEndian>()? as usize;
let pos = cursor.position() as usize;
cursor.into_inner();
if message.len() < pos + len {
Err(::clacks_crypto::symm::AuthenticationFailure::BadLength)?
}
let payload = &message[pos..pos+len];
Ok(InboundMessage {
message_id: message_id,
payload: payload.into(),
was_encrypted: false,
seq_no: None,
})
}
fn decrypt_message(&self, message: &[u8]) -> Result<InboundMessage> {
let (inbound, payload) = self.fresh_auth_key()?.decrypt_and_verify_message(message)?;
if inbound.session_id!= self.session_id && Some(inbound.session_id)!= self.temp_session_id {
Err(SessionFailure::BadSessionId)?
}
if!self.server_salts.iter().any(|s| s.salt == inbound.salt) {
Err(SessionFailure::BadSalt)?
}
Ok(InboundMessage {
payload,
message_id: inbound.message_id,
was_encrypted: true,
seq_no: Some(inbound.seq_no),
})
}
}
pub fn future_salt_from_negotiated_salt(salt: i64) -> mtproto::FutureSalt {
let time = Utc::now();
mtproto::future_salt::FutureSalt {
salt,
valid_since: time.timestamp() as i32,
valid_until: (time + Duration::minutes(10)).timestamp() as i32,
}.into_boxed()
}
| next_content_seq_no | identifier_name |
session.rs | use chrono::{DateTime, Duration, Utc, Timelike, TimeZone};
use clacks_crypto::CSRNG;
use clacks_crypto::symm::AuthKey;
use clacks_mtproto::{AnyBoxedSerialize, BareSerialize, BoxedSerialize, ConstructorNumber, IntoBoxed, mtproto};
use clacks_mtproto::mtproto::wire::outbound_encrypted::OutboundEncrypted;
use clacks_mtproto::mtproto::wire::outbound_raw::OutboundRaw;
use byteorder::{LittleEndian, ByteOrder, ReadBytesExt};
use either::Either;
use rand::Rng;
use std::{cmp, io, mem};
use Result;
#[derive(Debug, Fail)]
#[fail(display = "telegram error code {}", _0)]
pub struct ErrorCode(i32);
#[derive(Debug, Fail)]
pub enum SessionFailure {
#[fail(display = "an auth key was needed, but none had been adopted")]
NoAuthKey,
#[fail(display = "a salt was needed, but none were available")]
NoSalts,
#[fail(display = "incorrect session ID received")]
BadSessionId,
#[fail(display = "incorrect salt received")]
BadSalt,
}
fn next_message_id() -> i64 {
let time = Utc::now();
let timestamp = time.timestamp() as i64;
let nano = time.nanosecond() as i64;
((timestamp << 32) | (nano & 0x_7fff_fffc))
}
#[derive(Debug, Clone)]
pub struct AppId {
pub api_id: i32,
pub api_hash: String,
}
#[derive(Debug, Clone)]
struct Salt {
valid_since: DateTime<Utc>,
valid_until: DateTime<Utc>,
salt: i64,
}
impl From<mtproto::FutureSalt> for Salt {
fn from(fs: mtproto::FutureSalt) -> Self {
Salt {
valid_since: Utc.timestamp(*fs.valid_since() as i64, 0),
valid_until: Utc.timestamp(*fs.valid_until() as i64, 0),
salt: *fs.salt(),
}
}
}
#[derive(Debug, Clone)]
pub struct Session {
session_id: i64,
temp_session_id: Option<i64>,
server_salts: Vec<Salt>,
seq_no: i32,
auth_key: Option<AuthKey>,
to_ack: Vec<i64>,
app_id: AppId,
}
#[derive(Debug, Default)]
pub struct PlainPayload {
dummy: (),
}
#[derive(Debug, Default)]
pub struct EncryptedPayload {
session_id: Option<i64>,
}
pub struct MessageBuilder<P> {
message_id: i64,
payload: mtproto::TLObject,
payload_opts: P,
}
pub type EitherMessageBuilder = MessageBuilder<Either<PlainPayload, EncryptedPayload>>;
impl<PO: Default> MessageBuilder<PO> {
fn with_message_id<P>(message_id: i64, payload: P) -> Self
where P: AnyBoxedSerialize,
{
let payload = mtproto::TLObject::new(payload).into();
MessageBuilder {
message_id, payload,
payload_opts: Default::default(),
}
}
pub fn new<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
Self::with_message_id(next_message_id(), payload)
}
}
impl<PO> MessageBuilder<PO> {
pub fn message_id(&self) -> i64 {
self.message_id
}
pub fn constructor(&self) -> ConstructorNumber {
self.payload.serialize_boxed().0
}
fn seq_no_from<SNF>(&self, seq_no_func: SNF) -> i32
where SNF: FnOnce(bool) -> i32
{
seq_no_func(is_content_message(self.constructor()))
}
pub fn into_basic_message<SNF>(self, seq_no_func: SNF) -> mtproto::manual::basic_message::BasicMessage
where SNF: FnOnce(bool) -> i32,
{
mtproto::manual::basic_message::BasicMessage {
msg_id: self.message_id,
seqno: self.seq_no_from(seq_no_func),
body: self.payload.into(),
}
}
}
impl MessageBuilder<PlainPayload> {
pub fn into_outbound_raw(self) -> OutboundRaw {
OutboundRaw {
auth_key_id: 0,
message_id: self.message_id,
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Left(self.payload_opts),
}
}
}
impl MessageBuilder<EncryptedPayload> {
pub fn with_session_id(mut self, session_id: i64) -> Self {
self.payload_opts.session_id = Some(session_id);
self
}
pub fn into_outbound_encrypted<SNF>(self, salt: i64, session_id: i64, seq_no_func: SNF) -> OutboundEncrypted
where SNF: FnOnce(bool) -> i32,
{
OutboundEncrypted {
salt,
session_id: self.payload_opts.session_id.unwrap_or(session_id),
message_id: self.message_id,
seq_no: self.seq_no_from(seq_no_func),
payload: self.payload.into(),
}
}
pub fn lift(self) -> EitherMessageBuilder {
MessageBuilder {
message_id: self.message_id,
payload: self.payload,
payload_opts: Either::Right(self.payload_opts),
}
}
}
impl EitherMessageBuilder {
pub fn plain<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<PlainPayload>::new(payload).lift()
}
pub fn encrypted<P>(payload: P) -> Self
where P: AnyBoxedSerialize,
{
MessageBuilder::<EncryptedPayload>::new(payload).lift()
}
pub fn separate(self) -> Either<MessageBuilder<PlainPayload>, MessageBuilder<EncryptedPayload>> {
let MessageBuilder { message_id, payload, payload_opts: e } = self;
match e {
Either::Left(payload_opts) => Either::Left(MessageBuilder { message_id, payload, payload_opts }),
Either::Right(payload_opts) => Either::Right(MessageBuilder { message_id, payload, payload_opts }),
}
}
}
fn is_content_message(n: ConstructorNumber) -> bool {
// XXX: there has to be a better way
match n {
ConstructorNumber(0x62d6b459) |
ConstructorNumber(0x73f1f8dc) => false,
_ => true,
}
}
#[derive(Debug, Clone)]
pub struct InboundMessage {
pub message_id: i64,
pub payload: Vec<u8>,
pub was_encrypted: bool,
pub seq_no: Option<i32>,
}
impl Session {
pub fn new(app_id: AppId) -> Session {
Session {
app_id,
session_id: CSRNG.gen(),
temp_session_id: None,
server_salts: vec![],
seq_no: 0,
auth_key: None,
to_ack: vec![],
}
}
fn next_content_seq_no(&mut self) -> i32 {
let ret = self.seq_no | 1;
self.seq_no += 2;
ret
}
fn next_seq_no(&mut self, content_message: bool) -> i32 {
if content_message {
self.next_content_seq_no()
} else {
self.seq_no
}
}
fn latest_server_salt(&mut self) -> Result<i64> {
let time = {
let last_salt = match self.server_salts.last() {
Some(s) => s,
None => Err(SessionFailure::NoSalts)?,
};
// Make sure at least one salt is retained.
cmp::min(Utc::now(), last_salt.valid_until.clone())
};
self.server_salts.retain(|s| &s.valid_until >= &time);
Ok(self.server_salts.first().unwrap().salt)
}
pub fn add_server_salts<I>(&mut self, salts: I)
where I: IntoIterator<Item = mtproto::FutureSalt>,
{
self.server_salts.extend(salts.into_iter().map(Into::into));
self.server_salts.sort_by(|a, b| a.valid_since.cmp(&b.valid_since));
}
pub fn adopt_key(&mut self, authorization_key: AuthKey) {
self.auth_key = Some(authorization_key);
}
pub fn ack_id(&mut self, id: i64) {
self.to_ack.push(id);
}
fn pack_message_container<PO, I>(&mut self, payloads: I) -> mtproto::manual::MessageContainer
where I: IntoIterator<Item = MessageBuilder<PO>>,
{
let messages: Vec<_> = payloads.into_iter()
.map(|m| m.into_basic_message(|c| self.next_seq_no(c)))
.collect();
mtproto::manual::msg_container::MsgContainer {
messages: messages.into(),
}.into_boxed()
}
fn fresh_auth_key(&self) -> Result<AuthKey> {
match self.auth_key {
Some(ref key) => Ok(key.clone()),
None => Err(SessionFailure::NoAuthKey.into()),
}
}
fn pack_payload_with_acks<PO: Default>(&mut self, payload: MessageBuilder<PO>) -> MessageBuilder<PO> {
if self.to_ack.is_empty() {
return payload;
};
let acks = MessageBuilder::new(mtproto::msgs_ack::MsgsAck {
msg_ids: mem::replace(&mut self.to_ack, vec![]).into(),
}.into_boxed());
MessageBuilder::new(self.pack_message_container(vec![payload, acks]))
}
pub fn serialize_plain_message(&mut self, message: MessageBuilder<PlainPayload>) -> Result<Vec<u8>> {
Ok(message.into_outbound_raw().bare_serialized_bytes()?)
}
pub fn serialize_encrypted_message(&mut self, message: MessageBuilder<EncryptedPayload>) -> Result<Vec<u8>> {
let key = self.fresh_auth_key()?;
let message = self.pack_payload_with_acks(message)
.into_outbound_encrypted(
self.latest_server_salt()?, self.session_id,
|c| self.next_seq_no(c));
Ok(key.encrypt_message(message)?)
}
pub fn serialize_message(&mut self, message: EitherMessageBuilder) -> Result<Vec<u8>> {
match message.separate() { | Either::Right(m) => self.serialize_encrypted_message(m),
}
}
pub fn bind_auth_key(&mut self, perm_key: AuthKey, expires_in: Duration) -> Result<MessageBuilder<EncryptedPayload>> {
let temp_key = self.fresh_auth_key()?;
let message_id = next_message_id();
let (session_id, bind_message) = perm_key.bind_temp_auth_key(&temp_key, expires_in, message_id)?;
self.temp_session_id = Some(session_id);
Ok({
MessageBuilder::with_message_id(message_id, bind_message)
.with_session_id(session_id)
})
}
pub fn process_message(&self, message: &[u8]) -> Result<InboundMessage> {
if message.len() == 4 {
Err(ErrorCode(LittleEndian::read_i32(&message)))?
} else if message.len() < 8 {
Err(format_err!("strange message length: {:?}", message))?
}
let mut cursor = io::Cursor::new(message);
let auth_key_id = cursor.read_i64::<LittleEndian>()?;
if auth_key_id!= 0 {
cursor.into_inner();
return self.decrypt_message(message);
}
let message_id = cursor.read_i64::<LittleEndian>()?;
let len = cursor.read_i32::<LittleEndian>()? as usize;
let pos = cursor.position() as usize;
cursor.into_inner();
if message.len() < pos + len {
Err(::clacks_crypto::symm::AuthenticationFailure::BadLength)?
}
let payload = &message[pos..pos+len];
Ok(InboundMessage {
message_id: message_id,
payload: payload.into(),
was_encrypted: false,
seq_no: None,
})
}
fn decrypt_message(&self, message: &[u8]) -> Result<InboundMessage> {
let (inbound, payload) = self.fresh_auth_key()?.decrypt_and_verify_message(message)?;
if inbound.session_id!= self.session_id && Some(inbound.session_id)!= self.temp_session_id {
Err(SessionFailure::BadSessionId)?
}
if!self.server_salts.iter().any(|s| s.salt == inbound.salt) {
Err(SessionFailure::BadSalt)?
}
Ok(InboundMessage {
payload,
message_id: inbound.message_id,
was_encrypted: true,
seq_no: Some(inbound.seq_no),
})
}
}
pub fn future_salt_from_negotiated_salt(salt: i64) -> mtproto::FutureSalt {
let time = Utc::now();
mtproto::future_salt::FutureSalt {
salt,
valid_since: time.timestamp() as i32,
valid_until: (time + Duration::minutes(10)).timestamp() as i32,
}.into_boxed()
} | Either::Left(m) => self.serialize_plain_message(m), | random_line_split |
typechecking.rs | use std::cell::RefCell;
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use std::fmt::Write;
/*
use std::collections::hash_set::Union;
use std::iter::Iterator;
use itertools::Itertools;
*/
use ast;
use util::ScopeStack;
use symbol_table::{SymbolSpec, SymbolTable};
pub type TypeName = Rc<String>;
type TypeResult<T> = Result<T, String>;
#[derive(Debug, PartialEq, Clone)]
enum Type {
Const(TConst),
Var(TypeName),
Func(Vec<Type>),
}
#[derive(Debug, PartialEq, Clone)]
enum TConst {
Unit,
Nat,
StringT,
//Custom(String)
}
#[derive(Debug, PartialEq, Clone)]
struct Scheme {
names: Vec<TypeName>,
ty: Type,
}
impl fmt::Display for Scheme {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "∀{:?}. {:?}", self.names, self.ty)
}
}
#[derive(Debug, PartialEq, Clone)]
struct Substitution(HashMap<TypeName, Type>);
impl Substitution {
fn empty() -> Substitution {
Substitution(HashMap::new())
}
}
#[derive(Debug, PartialEq, Clone)]
struct TypeEnv(HashMap<TypeName, Scheme>);
impl TypeEnv {
fn default() -> TypeEnv {
TypeEnv(HashMap::new())
}
fn populate_from_symbols(&mut self, symbol_table: &SymbolTable) {
for (name, symbol) in symbol_table.values.iter() {
if let SymbolSpec::Func(ref type_names) = symbol.spec {
let mut ch: char = 'a';
let mut names = vec![];
for _ in type_names.iter() {
names.push(Rc::new(format!("{}", ch)));
ch = ((ch as u8) + 1) as char;
}
let sigma = Scheme {
names: names.clone(),
ty: Type::Func(names.into_iter().map(|n| Type::Var(n)).collect())
};
self.0.insert(name.clone(), sigma);
}
}
}
}
pub struct TypeContext<'a> {
values: ScopeStack<'a, TypeName, Type>,
symbol_table_handle: Rc<RefCell<SymbolTable>>,
global_env: TypeEnv
}
impl<'a> TypeContext<'a> {
pub fn new(symbol_table_handle: Rc<RefCell<SymbolTable>>) -> TypeContext<'static> {
TypeContext { values: ScopeStack::new(None), global_env: TypeEnv::default(), symbol_table_handle }
}
pub fn debug_types(&self) -> String {
let mut output = format!("Type environment\n");
for (name, scheme) in &self.global_env.0 {
write!(output, "{} -> {}\n", name, scheme).unwrap();
}
output
}
pub fn type_check_ast(&mut self, input: &ast::AST) -> Result<String, String> {
let ref symbol_table = self.symbol_table_handle.borrow();
self.global_env.populate_from_symbols(symbol_table);
let output = self.global_env.infer_block(&input.0)?;
Ok(format!("{:?}", output))
}
}
impl TypeEnv {
fn instantiate(&mut self, sigma: Scheme) -> Type {
match sigma {
Scheme { ty,.. } => ty,
}
}
fn generate(&mut self, ty: Type) -> Scheme {
Scheme {
names: vec![], //TODO incomplete
ty
}
}
fn infer_block(&mut self, block: &Vec<ast::Statement>) -> TypeResult<Type> {
let mut output = Type::Const(TConst::Unit);
for statement in block {
output = self.infer_statement(statement)?;
}
Ok(output)
}
fn infer_statement(&mut self, statement: &ast::Statement) -> TypeResult<Type> {
match statement {
ast::Statement::ExpressionStatement(expr) => self.infer_expr(expr),
ast::Statement::Declaration(decl) => self.infer_decl(decl)
}
}
fn infer_decl(&mut self, decl: &ast::Declaration) -> TypeResult<Type> {
use ast::Declaration::*;
match decl {
Binding { name, expr,.. } => {
let ty = self.infer_expr(expr)?;
let sigma = self.generate(ty);
self.0.insert(name.clone(), sigma);
},
_ => (),
}
Ok(Type::Const(TConst::Unit))
}
fn infer_expr(&mut self, expr: &ast::Expression) -> TypeResult<Type> {
match expr {
ast::Expression(expr, Some(anno)) => {
self.infer_exprtype(expr)
},
ast::Expression(expr, None) => {
self.infer_exprtype(expr)
}
}
}
fn infer_exprtype(&mut self, expr: &ast::ExpressionType) -> TypeResult<Type> {
use self::TConst::*;
use ast::ExpressionType::*;
Ok(match expr {
NatLiteral(_) => Type::Const(Nat),
StringLiteral(_) => Type::Const(StringT),
BinExp(op, lhs, rhs) => {
return Err(format!("NOTDONE"))
},
Call { f, arguments } => {
return Err(format!("NOTDONE"))
},
Value(name) => {
let s = match self.0.get(name) {
Some(sigma) => sigma.clone(),
None => return Err(format!("Unknown variable: {}", name))
};
self.instantiate(s)
},
_ => Type::Const(Unit)
})
}
}
/* GIANT TODO - use the rust im crate, unless I make this code way less haskell-ish after it's done
*/
/*
pub type TypeResult<T> = Result<T, String>;
*/
/* TODO this should just check the name against a map, and that map should be pre-populated with
* types */
/*
impl parsing::TypeName {
fn to_type(&self) -> TypeResult<Type> {
use self::parsing::TypeSingletonName;
use self::parsing::TypeName::*;
use self::Type::*; use self::TConstOld::*;
Ok(match self {
Tuple(_) => return Err(format!("Tuples not yet implemented")),
Singleton(name) => match name {
TypeSingletonName { name,.. } => match &name[..] {
/*
"Nat" => Const(Nat),
"Int" => Const(Int),
"Float" => Const(Float),
"Bool" => Const(Bool),
"String" => Const(StringT),
*/
n => Const(Custom(n.to_string()))
}
}
})
}
}
*/
/*
impl TypeContext {
pub fn type_check_ast(&mut self, ast: &parsing::AST) -> TypeResult<String> {
let ref block = ast.0;
let mut infer = Infer::default();
let env = TypeEnvironment::default();
let output = infer.infer_block(block, &env);
match output {
Ok(s) => Ok(format!("{:?}", s)),
Err(s) => Err(format!("Error: {:?}", s))
}
}
}
// this is the equivalent of the Haskell Infer monad
#[derive(Debug, Default)]
struct Infer {
_idents: u32,
}
#[derive(Debug)]
enum InferError {
CannotUnify(MonoType, MonoType),
OccursCheckFailed(Rc<String>, MonoType),
UnknownIdentifier(Rc<String>),
Custom(String),
}
type InferResult<T> = Result<T, InferError>;
impl Infer {
fn fresh(&mut self) -> MonoType {
let i = self._idents;
self._idents += 1;
let name = Rc::new(format!("{}", ('a' as u8 + 1) as char));
MonoType::Var(name)
}
fn unify(&mut self, a: MonoType, b: MonoType) -> InferResult<Substitution> {
use self::InferError::*; use self::MonoType::*;
Ok(match (a, b) {
(Const(ref a), Const(ref b)) if a == b => Substitution::new(),
(Var(ref name), ref var) => Substitution::bind_variable(name, var),
(ref var, Var(ref name)) => Substitution::bind_variable(name, var),
(Function(box a1, box b1), Function(box a2, box b2)) => {
let s1 = self.unify(a1, a2)?;
let s2 = self.unify(b1.apply_substitution(&s1), b2.apply_substitution(&s1))?;
s1.merge(s2)
},
(a, b) => return Err(CannotUnify(a, b))
})
}
fn infer_block(&mut self, block: &Vec<parsing::Statement>, env: &TypeEnvironment) -> InferResult<MonoType> {
use self::parsing::Statement;
let mut ret = MonoType::Const(TypeConst::Unit);
for statement in block.iter() {
ret = match statement {
Statement::ExpressionStatement(expr) => {
let (sub, ty) = self.infer_expr(expr, env)?;
//TODO handle substitution monadically
ty
}
Statement::Declaration(decl) => MonoType::Const(TypeConst::Unit),
}
}
Ok(ret)
}
fn infer_expr(&mut self, expr: &parsing::Expression, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => self.infer_annotated_expr(e, anno, env),
/*
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
*/
Expression(e, None) => self.infer_exprtype(e, env)
}
}
fn infer_annotated_expr(&mut self, expr: &parsing::ExpressionType, anno: &parsing::TypeName, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
Err(InferError::Custom(format!("exprtype not done: {:?}", expr)))
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::ExpressionType::*;
use self::TypeConst::*;
Ok(match expr {
NatLiteral(_) => (Substitution::new(), MonoType::Const(Nat)),
FloatLiteral(_) => (Substitution::new(), MonoType::Const(Float)),
StringLiteral(_) => (Substitution::new(), MonoType::Const(StringT)),
BoolLiteral(_) => (Substitution::new(), MonoType::Const(Bool)),
Value(name) => match env.lookup(name) {
Some(sigma) => {
let tau = self.instantiate(&sigma);
(Substitution::new(), tau)
},
None => return Err(InferError::UnknownIdentifier(name.clone())),
},
e => return Err(InferError::Custom(format!("Type inference for {:?} not done", e)))
})
}
fn instantiate(&mut self, sigma: &PolyType) -> MonoType {
let ref ty: MonoType = sigma.1;
let mut subst = Substitution::new();
for name in sigma.0.iter() {
let fresh_mvar = self.fresh();
let new = Substitution::bind_variable(name, &fresh_mvar);
subst = subst.merge(new);
}
ty.apply_substitution(&subst)
}
}
*/
/* OLD STUFF DOWN HERE */
/*
impl TypeContext {
fn infer_block(&mut self, statements: &Vec<parsing::Statement>) -> TypeResult<Type> {
let mut ret_type = Type::Const(TConst::Unit);
for statement in statements {
ret_type = self.infer_statement(statement)?;
}
Ok(ret_type)
}
fn infer_statement(&mut self, statement: &parsing::Statement) -> TypeResult<Type> {
use self::parsing::Statement::*;
match statement {
ExpressionStatement(expr) => self.infer(expr),
Declaration(decl) => self.add_declaration(decl),
}
}
fn add_declaration(&mut self, decl: &parsing::Declaration) -> TypeResult<Type> { | Binding { name, expr,.. } => {
let ty = self.infer(expr)?;
self.bindings.insert(name.clone(), ty);
},
_ => return Err(format!("other formats not done"))
}
Ok(Void)
}
fn infer(&mut self, expr: &parsing::Expression) -> TypeResult<Type> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => {
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
Expression(e, None) => self.infer_exprtype(e)
}
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType) -> TypeResult<Type> {
use self::parsing::ExpressionType::*;
use self::Type::*; use self::TConst::*;
match expr {
NatLiteral(_) => Ok(Const(Nat)),
FloatLiteral(_) => Ok(Const(Float)),
StringLiteral(_) => Ok(Const(StringT)),
BoolLiteral(_) => Ok(Const(Bool)),
BinExp(op, lhs, rhs) => { /* remember there are both the haskell convention talk and the write you a haskell ways to do this! */
match op.get_type()? {
Func(box t1, box Func(box t2, box t3)) => {
let lhs_ty = self.infer(lhs)?;
let rhs_ty = self.infer(rhs)?;
self.unify(t1, lhs_ty)?;
self.unify(t2, rhs_ty)?;
Ok(t3)
},
other => Err(format!("{:?} is not a binary function type", other))
}
},
PrefixExp(op, expr) => match op.get_type()? {
Func(box t1, box t2) => {
let expr_ty = self.infer(expr)?;
self.unify(t1, expr_ty)?;
Ok(t2)
},
other => Err(format!("{:?} is not a prefix op function type", other))
},
Value(name) => {
match self.bindings.get(name) {
Some(ty) => Ok(ty.clone()),
None => Err(format!("No binding found for variable: {}", name)),
}
},
Call { f, arguments } => {
let mut tf = self.infer(f)?;
for arg in arguments.iter() {
match tf {
Func(box t, box rest) => {
let t_arg = self.infer(arg)?;
self.unify(t, t_arg)?;
tf = rest;
},
other => return Err(format!("Function call failed to unify; last type: {:?}", other)),
}
}
Ok(tf)
},
TupleLiteral(expressions) => {
let mut types = vec![];
for expr in expressions {
types.push(self.infer(expr)?);
}
Ok(Sum(types))
},
_ => Err(format!("Type not yet implemented"))
}
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeResult<Type> {
use self::Type::*;// use self::TConst::*;
match (t1, t2) {
(Const(ref a), Const(ref b)) if a == b => Ok(Const(a.clone())),
(a, b) => Err(format!("Types {:?} and {:?} don't unify", a, b))
}
}
}
*/
#[cfg(test)]
mod tests {
/*
use super::{Type, TConst, TypeContext};
use super::Type::*;
use super::TConst::*;
use std::rc::Rc;
use std::cell::RefCell;
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let symbol_table = Rc::new(RefCell::new(SymbolTable::new()));
let mut tc = TypeContext::new(symbol_table);
let ast = ::ast::parse(::tokenizing::tokenize($input)).0.unwrap() ;
//tc.add_symbols(&ast);
assert_eq!($correct, tc.infer_block(&ast.0).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", Const(Nat));
//type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
*/
} | use self::parsing::Declaration::*;
use self::Type::*;
match decl { | random_line_split |
typechecking.rs | use std::cell::RefCell;
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use std::fmt::Write;
/*
use std::collections::hash_set::Union;
use std::iter::Iterator;
use itertools::Itertools;
*/
use ast;
use util::ScopeStack;
use symbol_table::{SymbolSpec, SymbolTable};
pub type TypeName = Rc<String>;
type TypeResult<T> = Result<T, String>;
#[derive(Debug, PartialEq, Clone)]
enum Type {
Const(TConst),
Var(TypeName),
Func(Vec<Type>),
}
#[derive(Debug, PartialEq, Clone)]
enum TConst {
Unit,
Nat,
StringT,
//Custom(String)
}
#[derive(Debug, PartialEq, Clone)]
struct Scheme {
names: Vec<TypeName>,
ty: Type,
}
impl fmt::Display for Scheme {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "∀{:?}. {:?}", self.names, self.ty)
}
}
#[derive(Debug, PartialEq, Clone)]
struct Substitution(HashMap<TypeName, Type>);
impl Substitution {
fn empty() -> Substitution {
Substitution(HashMap::new())
}
}
#[derive(Debug, PartialEq, Clone)]
struct TypeEnv(HashMap<TypeName, Scheme>);
impl TypeEnv {
fn default() -> TypeEnv {
TypeEnv(HashMap::new())
}
fn populate_from_symbols(&mut self, symbol_table: &SymbolTable) {
for (name, symbol) in symbol_table.values.iter() {
if let SymbolSpec::Func(ref type_names) = symbol.spec {
let mut ch: char = 'a';
let mut names = vec![];
for _ in type_names.iter() {
names.push(Rc::new(format!("{}", ch)));
ch = ((ch as u8) + 1) as char;
}
let sigma = Scheme {
names: names.clone(),
ty: Type::Func(names.into_iter().map(|n| Type::Var(n)).collect())
};
self.0.insert(name.clone(), sigma);
}
}
}
}
pub struct TypeContext<'a> {
values: ScopeStack<'a, TypeName, Type>,
symbol_table_handle: Rc<RefCell<SymbolTable>>,
global_env: TypeEnv
}
impl<'a> TypeContext<'a> {
pub fn new(symbol_table_handle: Rc<RefCell<SymbolTable>>) -> TypeContext<'static> {
TypeContext { values: ScopeStack::new(None), global_env: TypeEnv::default(), symbol_table_handle }
}
pub fn debug_types(&self) -> String {
let mut output = format!("Type environment\n");
for (name, scheme) in &self.global_env.0 {
write!(output, "{} -> {}\n", name, scheme).unwrap();
}
output
}
pub fn type_check_ast(&mut self, input: &ast::AST) -> Result<String, String> {
let ref symbol_table = self.symbol_table_handle.borrow();
self.global_env.populate_from_symbols(symbol_table);
let output = self.global_env.infer_block(&input.0)?;
Ok(format!("{:?}", output))
}
}
impl TypeEnv {
fn instantiate(&mut self, sigma: Scheme) -> Type {
match sigma {
Scheme { ty,.. } => ty,
}
}
fn generate(&mut self, ty: Type) -> Scheme {
Scheme {
names: vec![], //TODO incomplete
ty
}
}
fn infer_block(&mut self, block: &Vec<ast::Statement>) -> TypeResult<Type> {
let mut output = Type::Const(TConst::Unit);
for statement in block {
output = self.infer_statement(statement)?;
}
Ok(output)
}
fn infer_statement(&mut self, statement: &ast::Statement) -> TypeResult<Type> {
match statement {
ast::Statement::ExpressionStatement(expr) => self.infer_expr(expr),
ast::Statement::Declaration(decl) => self.infer_decl(decl)
}
}
fn infer_decl(&mut self, decl: &ast::Declaration) -> TypeResult<Type> {
use ast::Declaration::*;
match decl {
Binding { name, expr,.. } => {
let ty = self.infer_expr(expr)?;
let sigma = self.generate(ty);
self.0.insert(name.clone(), sigma);
},
_ => (),
}
Ok(Type::Const(TConst::Unit))
}
fn infer_expr(&mut self, expr: &ast::Expression) -> TypeResult<Type> {
match expr {
ast::Expression(expr, Some(anno)) => {
self.infer_exprtype(expr)
},
ast::Expression(expr, None) => {
self.infer_exprtype(expr)
}
}
}
fn infer_exprtype(&mut self, expr: &ast::ExpressionType) -> TypeResult<Type> {
use self::TConst::*;
use ast::ExpressionType::*;
Ok(match expr {
NatLiteral(_) => Type::Const(Nat),
StringLiteral(_) => Type::Const(StringT),
BinExp(op, lhs, rhs) => {
return Err(format!("NOTDONE"))
},
Call { f, arguments } => {
| Value(name) => {
let s = match self.0.get(name) {
Some(sigma) => sigma.clone(),
None => return Err(format!("Unknown variable: {}", name))
};
self.instantiate(s)
},
_ => Type::Const(Unit)
})
}
}
/* GIANT TODO - use the rust im crate, unless I make this code way less haskell-ish after it's done
*/
/*
pub type TypeResult<T> = Result<T, String>;
*/
/* TODO this should just check the name against a map, and that map should be pre-populated with
* types */
/*
impl parsing::TypeName {
fn to_type(&self) -> TypeResult<Type> {
use self::parsing::TypeSingletonName;
use self::parsing::TypeName::*;
use self::Type::*; use self::TConstOld::*;
Ok(match self {
Tuple(_) => return Err(format!("Tuples not yet implemented")),
Singleton(name) => match name {
TypeSingletonName { name,.. } => match &name[..] {
/*
"Nat" => Const(Nat),
"Int" => Const(Int),
"Float" => Const(Float),
"Bool" => Const(Bool),
"String" => Const(StringT),
*/
n => Const(Custom(n.to_string()))
}
}
})
}
}
*/
/*
impl TypeContext {
pub fn type_check_ast(&mut self, ast: &parsing::AST) -> TypeResult<String> {
let ref block = ast.0;
let mut infer = Infer::default();
let env = TypeEnvironment::default();
let output = infer.infer_block(block, &env);
match output {
Ok(s) => Ok(format!("{:?}", s)),
Err(s) => Err(format!("Error: {:?}", s))
}
}
}
// this is the equivalent of the Haskell Infer monad
#[derive(Debug, Default)]
struct Infer {
_idents: u32,
}
#[derive(Debug)]
enum InferError {
CannotUnify(MonoType, MonoType),
OccursCheckFailed(Rc<String>, MonoType),
UnknownIdentifier(Rc<String>),
Custom(String),
}
type InferResult<T> = Result<T, InferError>;
impl Infer {
fn fresh(&mut self) -> MonoType {
let i = self._idents;
self._idents += 1;
let name = Rc::new(format!("{}", ('a' as u8 + 1) as char));
MonoType::Var(name)
}
fn unify(&mut self, a: MonoType, b: MonoType) -> InferResult<Substitution> {
use self::InferError::*; use self::MonoType::*;
Ok(match (a, b) {
(Const(ref a), Const(ref b)) if a == b => Substitution::new(),
(Var(ref name), ref var) => Substitution::bind_variable(name, var),
(ref var, Var(ref name)) => Substitution::bind_variable(name, var),
(Function(box a1, box b1), Function(box a2, box b2)) => {
let s1 = self.unify(a1, a2)?;
let s2 = self.unify(b1.apply_substitution(&s1), b2.apply_substitution(&s1))?;
s1.merge(s2)
},
(a, b) => return Err(CannotUnify(a, b))
})
}
fn infer_block(&mut self, block: &Vec<parsing::Statement>, env: &TypeEnvironment) -> InferResult<MonoType> {
use self::parsing::Statement;
let mut ret = MonoType::Const(TypeConst::Unit);
for statement in block.iter() {
ret = match statement {
Statement::ExpressionStatement(expr) => {
let (sub, ty) = self.infer_expr(expr, env)?;
//TODO handle substitution monadically
ty
}
Statement::Declaration(decl) => MonoType::Const(TypeConst::Unit),
}
}
Ok(ret)
}
fn infer_expr(&mut self, expr: &parsing::Expression, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => self.infer_annotated_expr(e, anno, env),
/*
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
*/
Expression(e, None) => self.infer_exprtype(e, env)
}
}
fn infer_annotated_expr(&mut self, expr: &parsing::ExpressionType, anno: &parsing::TypeName, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
Err(InferError::Custom(format!("exprtype not done: {:?}", expr)))
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::ExpressionType::*;
use self::TypeConst::*;
Ok(match expr {
NatLiteral(_) => (Substitution::new(), MonoType::Const(Nat)),
FloatLiteral(_) => (Substitution::new(), MonoType::Const(Float)),
StringLiteral(_) => (Substitution::new(), MonoType::Const(StringT)),
BoolLiteral(_) => (Substitution::new(), MonoType::Const(Bool)),
Value(name) => match env.lookup(name) {
Some(sigma) => {
let tau = self.instantiate(&sigma);
(Substitution::new(), tau)
},
None => return Err(InferError::UnknownIdentifier(name.clone())),
},
e => return Err(InferError::Custom(format!("Type inference for {:?} not done", e)))
})
}
fn instantiate(&mut self, sigma: &PolyType) -> MonoType {
let ref ty: MonoType = sigma.1;
let mut subst = Substitution::new();
for name in sigma.0.iter() {
let fresh_mvar = self.fresh();
let new = Substitution::bind_variable(name, &fresh_mvar);
subst = subst.merge(new);
}
ty.apply_substitution(&subst)
}
}
*/
/* OLD STUFF DOWN HERE */
/*
impl TypeContext {
fn infer_block(&mut self, statements: &Vec<parsing::Statement>) -> TypeResult<Type> {
let mut ret_type = Type::Const(TConst::Unit);
for statement in statements {
ret_type = self.infer_statement(statement)?;
}
Ok(ret_type)
}
fn infer_statement(&mut self, statement: &parsing::Statement) -> TypeResult<Type> {
use self::parsing::Statement::*;
match statement {
ExpressionStatement(expr) => self.infer(expr),
Declaration(decl) => self.add_declaration(decl),
}
}
fn add_declaration(&mut self, decl: &parsing::Declaration) -> TypeResult<Type> {
use self::parsing::Declaration::*;
use self::Type::*;
match decl {
Binding { name, expr,.. } => {
let ty = self.infer(expr)?;
self.bindings.insert(name.clone(), ty);
},
_ => return Err(format!("other formats not done"))
}
Ok(Void)
}
fn infer(&mut self, expr: &parsing::Expression) -> TypeResult<Type> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => {
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
Expression(e, None) => self.infer_exprtype(e)
}
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType) -> TypeResult<Type> {
use self::parsing::ExpressionType::*;
use self::Type::*; use self::TConst::*;
match expr {
NatLiteral(_) => Ok(Const(Nat)),
FloatLiteral(_) => Ok(Const(Float)),
StringLiteral(_) => Ok(Const(StringT)),
BoolLiteral(_) => Ok(Const(Bool)),
BinExp(op, lhs, rhs) => { /* remember there are both the haskell convention talk and the write you a haskell ways to do this! */
match op.get_type()? {
Func(box t1, box Func(box t2, box t3)) => {
let lhs_ty = self.infer(lhs)?;
let rhs_ty = self.infer(rhs)?;
self.unify(t1, lhs_ty)?;
self.unify(t2, rhs_ty)?;
Ok(t3)
},
other => Err(format!("{:?} is not a binary function type", other))
}
},
PrefixExp(op, expr) => match op.get_type()? {
Func(box t1, box t2) => {
let expr_ty = self.infer(expr)?;
self.unify(t1, expr_ty)?;
Ok(t2)
},
other => Err(format!("{:?} is not a prefix op function type", other))
},
Value(name) => {
match self.bindings.get(name) {
Some(ty) => Ok(ty.clone()),
None => Err(format!("No binding found for variable: {}", name)),
}
},
Call { f, arguments } => {
let mut tf = self.infer(f)?;
for arg in arguments.iter() {
match tf {
Func(box t, box rest) => {
let t_arg = self.infer(arg)?;
self.unify(t, t_arg)?;
tf = rest;
},
other => return Err(format!("Function call failed to unify; last type: {:?}", other)),
}
}
Ok(tf)
},
TupleLiteral(expressions) => {
let mut types = vec![];
for expr in expressions {
types.push(self.infer(expr)?);
}
Ok(Sum(types))
},
_ => Err(format!("Type not yet implemented"))
}
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeResult<Type> {
use self::Type::*;// use self::TConst::*;
match (t1, t2) {
(Const(ref a), Const(ref b)) if a == b => Ok(Const(a.clone())),
(a, b) => Err(format!("Types {:?} and {:?} don't unify", a, b))
}
}
}
*/
#[cfg(test)]
mod tests {
/*
use super::{Type, TConst, TypeContext};
use super::Type::*;
use super::TConst::*;
use std::rc::Rc;
use std::cell::RefCell;
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let symbol_table = Rc::new(RefCell::new(SymbolTable::new()));
let mut tc = TypeContext::new(symbol_table);
let ast = ::ast::parse(::tokenizing::tokenize($input)).0.unwrap() ;
//tc.add_symbols(&ast);
assert_eq!($correct, tc.infer_block(&ast.0).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", Const(Nat));
//type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
*/
}
|
return Err(format!("NOTDONE"))
},
| conditional_block |
typechecking.rs | use std::cell::RefCell;
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use std::fmt::Write;
/*
use std::collections::hash_set::Union;
use std::iter::Iterator;
use itertools::Itertools;
*/
use ast;
use util::ScopeStack;
use symbol_table::{SymbolSpec, SymbolTable};
pub type TypeName = Rc<String>;
type TypeResult<T> = Result<T, String>;
#[derive(Debug, PartialEq, Clone)]
enum Type {
Const(TConst),
Var(TypeName),
Func(Vec<Type>),
}
#[derive(Debug, PartialEq, Clone)]
enum TConst {
Unit,
Nat,
StringT,
//Custom(String)
}
#[derive(Debug, PartialEq, Clone)]
struct Scheme {
names: Vec<TypeName>,
ty: Type,
}
impl fmt::Display for Scheme {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "∀{:?}. {:?}", self.names, self.ty)
}
}
#[derive(Debug, PartialEq, Clone)]
struct Su | ashMap<TypeName, Type>);
impl Substitution {
fn empty() -> Substitution {
Substitution(HashMap::new())
}
}
#[derive(Debug, PartialEq, Clone)]
struct TypeEnv(HashMap<TypeName, Scheme>);
impl TypeEnv {
fn default() -> TypeEnv {
TypeEnv(HashMap::new())
}
fn populate_from_symbols(&mut self, symbol_table: &SymbolTable) {
for (name, symbol) in symbol_table.values.iter() {
if let SymbolSpec::Func(ref type_names) = symbol.spec {
let mut ch: char = 'a';
let mut names = vec![];
for _ in type_names.iter() {
names.push(Rc::new(format!("{}", ch)));
ch = ((ch as u8) + 1) as char;
}
let sigma = Scheme {
names: names.clone(),
ty: Type::Func(names.into_iter().map(|n| Type::Var(n)).collect())
};
self.0.insert(name.clone(), sigma);
}
}
}
}
pub struct TypeContext<'a> {
values: ScopeStack<'a, TypeName, Type>,
symbol_table_handle: Rc<RefCell<SymbolTable>>,
global_env: TypeEnv
}
impl<'a> TypeContext<'a> {
pub fn new(symbol_table_handle: Rc<RefCell<SymbolTable>>) -> TypeContext<'static> {
TypeContext { values: ScopeStack::new(None), global_env: TypeEnv::default(), symbol_table_handle }
}
pub fn debug_types(&self) -> String {
let mut output = format!("Type environment\n");
for (name, scheme) in &self.global_env.0 {
write!(output, "{} -> {}\n", name, scheme).unwrap();
}
output
}
pub fn type_check_ast(&mut self, input: &ast::AST) -> Result<String, String> {
let ref symbol_table = self.symbol_table_handle.borrow();
self.global_env.populate_from_symbols(symbol_table);
let output = self.global_env.infer_block(&input.0)?;
Ok(format!("{:?}", output))
}
}
impl TypeEnv {
fn instantiate(&mut self, sigma: Scheme) -> Type {
match sigma {
Scheme { ty,.. } => ty,
}
}
fn generate(&mut self, ty: Type) -> Scheme {
Scheme {
names: vec![], //TODO incomplete
ty
}
}
fn infer_block(&mut self, block: &Vec<ast::Statement>) -> TypeResult<Type> {
let mut output = Type::Const(TConst::Unit);
for statement in block {
output = self.infer_statement(statement)?;
}
Ok(output)
}
fn infer_statement(&mut self, statement: &ast::Statement) -> TypeResult<Type> {
match statement {
ast::Statement::ExpressionStatement(expr) => self.infer_expr(expr),
ast::Statement::Declaration(decl) => self.infer_decl(decl)
}
}
fn infer_decl(&mut self, decl: &ast::Declaration) -> TypeResult<Type> {
use ast::Declaration::*;
match decl {
Binding { name, expr,.. } => {
let ty = self.infer_expr(expr)?;
let sigma = self.generate(ty);
self.0.insert(name.clone(), sigma);
},
_ => (),
}
Ok(Type::Const(TConst::Unit))
}
fn infer_expr(&mut self, expr: &ast::Expression) -> TypeResult<Type> {
match expr {
ast::Expression(expr, Some(anno)) => {
self.infer_exprtype(expr)
},
ast::Expression(expr, None) => {
self.infer_exprtype(expr)
}
}
}
fn infer_exprtype(&mut self, expr: &ast::ExpressionType) -> TypeResult<Type> {
use self::TConst::*;
use ast::ExpressionType::*;
Ok(match expr {
NatLiteral(_) => Type::Const(Nat),
StringLiteral(_) => Type::Const(StringT),
BinExp(op, lhs, rhs) => {
return Err(format!("NOTDONE"))
},
Call { f, arguments } => {
return Err(format!("NOTDONE"))
},
Value(name) => {
let s = match self.0.get(name) {
Some(sigma) => sigma.clone(),
None => return Err(format!("Unknown variable: {}", name))
};
self.instantiate(s)
},
_ => Type::Const(Unit)
})
}
}
/* GIANT TODO - use the rust im crate, unless I make this code way less haskell-ish after it's done
*/
/*
pub type TypeResult<T> = Result<T, String>;
*/
/* TODO this should just check the name against a map, and that map should be pre-populated with
* types */
/*
impl parsing::TypeName {
fn to_type(&self) -> TypeResult<Type> {
use self::parsing::TypeSingletonName;
use self::parsing::TypeName::*;
use self::Type::*; use self::TConstOld::*;
Ok(match self {
Tuple(_) => return Err(format!("Tuples not yet implemented")),
Singleton(name) => match name {
TypeSingletonName { name,.. } => match &name[..] {
/*
"Nat" => Const(Nat),
"Int" => Const(Int),
"Float" => Const(Float),
"Bool" => Const(Bool),
"String" => Const(StringT),
*/
n => Const(Custom(n.to_string()))
}
}
})
}
}
*/
/*
impl TypeContext {
pub fn type_check_ast(&mut self, ast: &parsing::AST) -> TypeResult<String> {
let ref block = ast.0;
let mut infer = Infer::default();
let env = TypeEnvironment::default();
let output = infer.infer_block(block, &env);
match output {
Ok(s) => Ok(format!("{:?}", s)),
Err(s) => Err(format!("Error: {:?}", s))
}
}
}
// this is the equivalent of the Haskell Infer monad
#[derive(Debug, Default)]
struct Infer {
_idents: u32,
}
#[derive(Debug)]
enum InferError {
CannotUnify(MonoType, MonoType),
OccursCheckFailed(Rc<String>, MonoType),
UnknownIdentifier(Rc<String>),
Custom(String),
}
type InferResult<T> = Result<T, InferError>;
impl Infer {
fn fresh(&mut self) -> MonoType {
let i = self._idents;
self._idents += 1;
let name = Rc::new(format!("{}", ('a' as u8 + 1) as char));
MonoType::Var(name)
}
fn unify(&mut self, a: MonoType, b: MonoType) -> InferResult<Substitution> {
use self::InferError::*; use self::MonoType::*;
Ok(match (a, b) {
(Const(ref a), Const(ref b)) if a == b => Substitution::new(),
(Var(ref name), ref var) => Substitution::bind_variable(name, var),
(ref var, Var(ref name)) => Substitution::bind_variable(name, var),
(Function(box a1, box b1), Function(box a2, box b2)) => {
let s1 = self.unify(a1, a2)?;
let s2 = self.unify(b1.apply_substitution(&s1), b2.apply_substitution(&s1))?;
s1.merge(s2)
},
(a, b) => return Err(CannotUnify(a, b))
})
}
fn infer_block(&mut self, block: &Vec<parsing::Statement>, env: &TypeEnvironment) -> InferResult<MonoType> {
use self::parsing::Statement;
let mut ret = MonoType::Const(TypeConst::Unit);
for statement in block.iter() {
ret = match statement {
Statement::ExpressionStatement(expr) => {
let (sub, ty) = self.infer_expr(expr, env)?;
//TODO handle substitution monadically
ty
}
Statement::Declaration(decl) => MonoType::Const(TypeConst::Unit),
}
}
Ok(ret)
}
fn infer_expr(&mut self, expr: &parsing::Expression, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => self.infer_annotated_expr(e, anno, env),
/*
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
*/
Expression(e, None) => self.infer_exprtype(e, env)
}
}
fn infer_annotated_expr(&mut self, expr: &parsing::ExpressionType, anno: &parsing::TypeName, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
Err(InferError::Custom(format!("exprtype not done: {:?}", expr)))
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType, env: &TypeEnvironment) -> InferResult<(Substitution, MonoType)> {
use self::parsing::ExpressionType::*;
use self::TypeConst::*;
Ok(match expr {
NatLiteral(_) => (Substitution::new(), MonoType::Const(Nat)),
FloatLiteral(_) => (Substitution::new(), MonoType::Const(Float)),
StringLiteral(_) => (Substitution::new(), MonoType::Const(StringT)),
BoolLiteral(_) => (Substitution::new(), MonoType::Const(Bool)),
Value(name) => match env.lookup(name) {
Some(sigma) => {
let tau = self.instantiate(&sigma);
(Substitution::new(), tau)
},
None => return Err(InferError::UnknownIdentifier(name.clone())),
},
e => return Err(InferError::Custom(format!("Type inference for {:?} not done", e)))
})
}
fn instantiate(&mut self, sigma: &PolyType) -> MonoType {
let ref ty: MonoType = sigma.1;
let mut subst = Substitution::new();
for name in sigma.0.iter() {
let fresh_mvar = self.fresh();
let new = Substitution::bind_variable(name, &fresh_mvar);
subst = subst.merge(new);
}
ty.apply_substitution(&subst)
}
}
*/
/* OLD STUFF DOWN HERE */
/*
impl TypeContext {
fn infer_block(&mut self, statements: &Vec<parsing::Statement>) -> TypeResult<Type> {
let mut ret_type = Type::Const(TConst::Unit);
for statement in statements {
ret_type = self.infer_statement(statement)?;
}
Ok(ret_type)
}
fn infer_statement(&mut self, statement: &parsing::Statement) -> TypeResult<Type> {
use self::parsing::Statement::*;
match statement {
ExpressionStatement(expr) => self.infer(expr),
Declaration(decl) => self.add_declaration(decl),
}
}
fn add_declaration(&mut self, decl: &parsing::Declaration) -> TypeResult<Type> {
use self::parsing::Declaration::*;
use self::Type::*;
match decl {
Binding { name, expr,.. } => {
let ty = self.infer(expr)?;
self.bindings.insert(name.clone(), ty);
},
_ => return Err(format!("other formats not done"))
}
Ok(Void)
}
fn infer(&mut self, expr: &parsing::Expression) -> TypeResult<Type> {
use self::parsing::Expression;
match expr {
Expression(e, Some(anno)) => {
let anno_ty = anno.to_type()?;
let ty = self.infer_exprtype(&e)?;
self.unify(ty, anno_ty)
},
Expression(e, None) => self.infer_exprtype(e)
}
}
fn infer_exprtype(&mut self, expr: &parsing::ExpressionType) -> TypeResult<Type> {
use self::parsing::ExpressionType::*;
use self::Type::*; use self::TConst::*;
match expr {
NatLiteral(_) => Ok(Const(Nat)),
FloatLiteral(_) => Ok(Const(Float)),
StringLiteral(_) => Ok(Const(StringT)),
BoolLiteral(_) => Ok(Const(Bool)),
BinExp(op, lhs, rhs) => { /* remember there are both the haskell convention talk and the write you a haskell ways to do this! */
match op.get_type()? {
Func(box t1, box Func(box t2, box t3)) => {
let lhs_ty = self.infer(lhs)?;
let rhs_ty = self.infer(rhs)?;
self.unify(t1, lhs_ty)?;
self.unify(t2, rhs_ty)?;
Ok(t3)
},
other => Err(format!("{:?} is not a binary function type", other))
}
},
PrefixExp(op, expr) => match op.get_type()? {
Func(box t1, box t2) => {
let expr_ty = self.infer(expr)?;
self.unify(t1, expr_ty)?;
Ok(t2)
},
other => Err(format!("{:?} is not a prefix op function type", other))
},
Value(name) => {
match self.bindings.get(name) {
Some(ty) => Ok(ty.clone()),
None => Err(format!("No binding found for variable: {}", name)),
}
},
Call { f, arguments } => {
let mut tf = self.infer(f)?;
for arg in arguments.iter() {
match tf {
Func(box t, box rest) => {
let t_arg = self.infer(arg)?;
self.unify(t, t_arg)?;
tf = rest;
},
other => return Err(format!("Function call failed to unify; last type: {:?}", other)),
}
}
Ok(tf)
},
TupleLiteral(expressions) => {
let mut types = vec![];
for expr in expressions {
types.push(self.infer(expr)?);
}
Ok(Sum(types))
},
_ => Err(format!("Type not yet implemented"))
}
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeResult<Type> {
use self::Type::*;// use self::TConst::*;
match (t1, t2) {
(Const(ref a), Const(ref b)) if a == b => Ok(Const(a.clone())),
(a, b) => Err(format!("Types {:?} and {:?} don't unify", a, b))
}
}
}
*/
#[cfg(test)]
mod tests {
/*
use super::{Type, TConst, TypeContext};
use super::Type::*;
use super::TConst::*;
use std::rc::Rc;
use std::cell::RefCell;
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let symbol_table = Rc::new(RefCell::new(SymbolTable::new()));
let mut tc = TypeContext::new(symbol_table);
let ast = ::ast::parse(::tokenizing::tokenize($input)).0.unwrap() ;
//tc.add_symbols(&ast);
assert_eq!($correct, tc.infer_block(&ast.0).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", Const(Nat));
//type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
*/
}
| bstitution(H | identifier_name |
lib.rs | /*!
This crate provides a robust regular expression parser.
This crate defines two primary types:
* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
An abstract syntax corresponds to a *structured representation* of the
concrete syntax of a regular expression, where the concrete syntax is the
pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
can be converted back to the original concrete syntax (modulo some details,
like whitespace). To a first approximation, the abstract syntax is complex
and difficult to analyze.
* [`Hir`](hir::Hir) is the high-level intermediate representation
("HIR" or "high-level IR" for short) of regular expression. It corresponds to
an intermediate state of a regular expression that sits between the abstract
syntax and the low level compiled opcodes that are eventually responsible for
executing a regular expression search. Given some high-level IR, it is not
possible to produce the original concrete syntax (although it is possible to
produce an equivalent concrete syntax, but it will likely scarcely resemble
the original pattern). To a first approximation, the high-level IR is simple
and easy to analyze.
These two types come with conversion routines:
* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
[`Ast`](ast::Ast).
* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
[`Hir`](hir::Hir).
As a convenience, the above two conversion routines are combined into one via
the top-level [`Parser`] type. This `Parser` will first convert your pattern to
an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
[`parse`] free function.
# Example
This example shows how to parse a pattern string into its HIR:
```
use regex_syntax::{hir::Hir, parse};
let hir = parse("a|b")?;
assert_eq!(hir, Hir::alternation(vec![
Hir::literal("a".as_bytes()),
Hir::literal("b".as_bytes()),
]));
# Ok::<(), Box<dyn std::error::Error>>(())
```
# Concrete syntax supported
The concrete syntax is documented as part of the public API of the
[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
# Input safety
A key feature of this library is that it is safe to use with end user facing
input. This plays a significant role in the internal implementation. In
particular:
1. Parsers provide a `nest_limit` option that permits callers to control how
deeply nested a regular expression is allowed to be. This makes it possible
to do case analysis over an `Ast` or an `Hir` using recursion without
worrying about stack overflow.
2. Since relying on a particular stack size is brittle, this crate goes to
great lengths to ensure that all interactions with both the `Ast` and the
`Hir` do not use recursion. Namely, they use constant stack space and heap
space proportional to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn is_meta_character(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex.
///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false.
/// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if!c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
| /// Returns true if and only if the given character is an ASCII word character.
///
/// An ASCII word character is defined by the following character class:
/// `[_0-9a-zA-Z]'.
pub fn is_word_byte(c: u8) -> bool {
match c {
b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::*;
#[test]
fn escape_meta() {
assert_eq!(
escape(r"\.+*?()|[]{}^$#&-~"),
r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
);
}
#[test]
fn word_byte() {
assert!(is_word_byte(b'a'));
assert!(!is_word_byte(b'-'));
}
#[test]
#[cfg(feature = "unicode-perl")]
fn word_char() {
assert!(is_word_character('a'), "ASCII");
assert!(is_word_character('à'), "Latin-1");
assert!(is_word_character('β'), "Greek");
assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
assert!(!is_word_character('-'));
assert!(!is_word_character('☃'));
}
#[test]
#[should_panic]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_panic() {
assert!(is_word_character('a'));
}
#[test]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_error() {
assert!(try_is_word_character('a').is_err());
}
}
| unicode::is_word_character(c)
}
| identifier_body |
lib.rs | /*!
This crate provides a robust regular expression parser.
This crate defines two primary types:
* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
An abstract syntax corresponds to a *structured representation* of the
concrete syntax of a regular expression, where the concrete syntax is the
pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
can be converted back to the original concrete syntax (modulo some details,
like whitespace). To a first approximation, the abstract syntax is complex
and difficult to analyze.
* [`Hir`](hir::Hir) is the high-level intermediate representation
("HIR" or "high-level IR" for short) of regular expression. It corresponds to
an intermediate state of a regular expression that sits between the abstract
syntax and the low level compiled opcodes that are eventually responsible for
executing a regular expression search. Given some high-level IR, it is not
possible to produce the original concrete syntax (although it is possible to
produce an equivalent concrete syntax, but it will likely scarcely resemble
the original pattern). To a first approximation, the high-level IR is simple
and easy to analyze.
These two types come with conversion routines:
* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
[`Ast`](ast::Ast).
* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
[`Hir`](hir::Hir).
As a convenience, the above two conversion routines are combined into one via
the top-level [`Parser`] type. This `Parser` will first convert your pattern to
an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
[`parse`] free function.
# Example
This example shows how to parse a pattern string into its HIR:
```
use regex_syntax::{hir::Hir, parse};
let hir = parse("a|b")?;
assert_eq!(hir, Hir::alternation(vec![
Hir::literal("a".as_bytes()),
Hir::literal("b".as_bytes()),
]));
# Ok::<(), Box<dyn std::error::Error>>(())
```
# Concrete syntax supported
The concrete syntax is documented as part of the public API of the
[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
# Input safety
A key feature of this library is that it is safe to use with end user facing
input. This plays a significant role in the internal implementation. In
particular:
1. Parsers provide a `nest_limit` option that permits callers to control how
deeply nested a regular expression is allowed to be. This makes it possible
to do case analysis over an `Ast` or an `Hir` using recursion without
worrying about stack overflow.
2. Since relying on a particular stack size is brittle, this crate goes to
great lengths to ensure that all interactions with both the `Ast` and the
`Hir` do not use recursion. Namely, they use constant stack space and heap
space proportional to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn | (c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex.
///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false.
/// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if!c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
unicode::is_word_character(c)
}
/// Returns true if and only if the given character is an ASCII word character.
///
/// An ASCII word character is defined by the following character class:
/// `[_0-9a-zA-Z]'.
pub fn is_word_byte(c: u8) -> bool {
match c {
b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::*;
#[test]
fn escape_meta() {
assert_eq!(
escape(r"\.+*?()|[]{}^$#&-~"),
r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
);
}
#[test]
fn word_byte() {
assert!(is_word_byte(b'a'));
assert!(!is_word_byte(b'-'));
}
#[test]
#[cfg(feature = "unicode-perl")]
fn word_char() {
assert!(is_word_character('a'), "ASCII");
assert!(is_word_character('à'), "Latin-1");
assert!(is_word_character('β'), "Greek");
assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
assert!(!is_word_character('-'));
assert!(!is_word_character('☃'));
}
#[test]
#[should_panic]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_panic() {
assert!(is_word_character('a'));
}
#[test]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_error() {
assert!(try_is_word_character('a').is_err());
}
}
| is_meta_character | identifier_name |
lib.rs | /*!
This crate provides a robust regular expression parser.
This crate defines two primary types:
* [`Ast`](ast::Ast) is the abstract syntax of a regular expression.
An abstract syntax corresponds to a *structured representation* of the
concrete syntax of a regular expression, where the concrete syntax is the
pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it
can be converted back to the original concrete syntax (modulo some details,
like whitespace). To a first approximation, the abstract syntax is complex
and difficult to analyze.
* [`Hir`](hir::Hir) is the high-level intermediate representation
("HIR" or "high-level IR" for short) of regular expression. It corresponds to
an intermediate state of a regular expression that sits between the abstract
syntax and the low level compiled opcodes that are eventually responsible for
executing a regular expression search. Given some high-level IR, it is not
possible to produce the original concrete syntax (although it is possible to
produce an equivalent concrete syntax, but it will likely scarcely resemble
the original pattern). To a first approximation, the high-level IR is simple
and easy to analyze.
These two types come with conversion routines:
* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an
[`Ast`](ast::Ast).
* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a
[`Hir`](hir::Hir).
As a convenience, the above two conversion routines are combined into one via
the top-level [`Parser`] type. This `Parser` will first convert your pattern to
an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level
[`parse`] free function.
# Example
This example shows how to parse a pattern string into its HIR:
```
use regex_syntax::{hir::Hir, parse};
let hir = parse("a|b")?;
assert_eq!(hir, Hir::alternation(vec![
Hir::literal("a".as_bytes()),
Hir::literal("b".as_bytes()),
]));
# Ok::<(), Box<dyn std::error::Error>>(())
```
# Concrete syntax supported
The concrete syntax is documented as part of the public API of the
[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax).
# Input safety
A key feature of this library is that it is safe to use with end user facing
input. This plays a significant role in the internal implementation. In
particular:
1. Parsers provide a `nest_limit` option that permits callers to control how
deeply nested a regular expression is allowed to be. This makes it possible
to do case analysis over an `Ast` or an `Hir` using recursion without
worrying about stack overflow.
2. Since relying on a particular stack size is brittle, this crate goes to
great lengths to ensure that all interactions with both the `Ast` and the
`Hir` do not use recursion. Namely, they use constant stack space and heap
space proportional to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn is_meta_character(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex. | /// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if!c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
unicode::is_word_character(c)
}
/// Returns true if and only if the given character is an ASCII word character.
///
/// An ASCII word character is defined by the following character class:
/// `[_0-9a-zA-Z]'.
pub fn is_word_byte(c: u8) -> bool {
match c {
b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::*;
#[test]
fn escape_meta() {
assert_eq!(
escape(r"\.+*?()|[]{}^$#&-~"),
r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string()
);
}
#[test]
fn word_byte() {
assert!(is_word_byte(b'a'));
assert!(!is_word_byte(b'-'));
}
#[test]
#[cfg(feature = "unicode-perl")]
fn word_char() {
assert!(is_word_character('a'), "ASCII");
assert!(is_word_character('à'), "Latin-1");
assert!(is_word_character('β'), "Greek");
assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)");
assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)");
assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)");
assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)");
assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)");
assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)");
assert!(!is_word_character('-'));
assert!(!is_word_character('☃'));
}
#[test]
#[should_panic]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_panic() {
assert!(is_word_character('a'));
}
#[test]
#[cfg(not(feature = "unicode-perl"))]
fn word_char_disabled_error() {
assert!(try_is_word_character('a').is_err());
}
} | ///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false. | random_line_split |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if!backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if!args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if!cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> |
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static,'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if!follow ||!path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
}
| {
download::download_all(&self.data_dir)
} | identifier_body |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if!backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if!args.index_dir.exists() |
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if!cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static,'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if!follow ||!path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
}
| {
args.create_index()?;
} | conditional_block |
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if!backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if!args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if!cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static,'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3") | .help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if!follow ||!path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
} | random_line_split |
|
main.rs | use std::env;
use std::ffi::OsStr;
use std::io::{self, Write};
use std::path::PathBuf;
use std::process;
use std::result;
use imdb_index::{Index, IndexBuilder, NgramType, Searcher};
use lazy_static::lazy_static;
use tabwriter::TabWriter;
use walkdir::WalkDir;
use crate::rename::{RenamerBuilder, RenameAction};
use crate::util::{choose, read_yesno, write_tsv};
mod download;
mod logger;
mod rename;
mod util;
/// Our type alias for handling errors throughout imdb-rename.
type Result<T> = result::Result<T, failure::Error>;
fn main() {
if let Err(err) = try_main() {
// A pipe error occurs when the consumer of this process's output has
// hung up. This is a normal event, and we should quit gracefully.
if is_pipe_error(&err) {
process::exit(0);
}
// Print the error, including all of its underlying causes.
eprintln!("{}", pretty_error(&err));
// If we get a non-empty backtrace (e.g., RUST_BACKTRACE=1 is set),
// then show it.
let backtrace = err.backtrace().to_string();
if!backtrace.trim().is_empty() {
eprintln!("{}", backtrace);
}
process::exit(1);
}
}
fn try_main() -> Result<()> {
logger::init()?;
log::set_max_level(log::LevelFilter::Info);
let args = Args::from_matches(&app().get_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if!args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn | (matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if!cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static,'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value(r"[Ee](?P<episode>[0-9]+)")
.help("A regex for matching episode numbers. The episode number \
is extracted by looking for a 'episode' capture group."))
.arg(Arg::with_name("re-season")
.long("re-season")
.takes_value(true)
.default_value(r"[Ss](?P<season>[0-9]+)")
.help("A regex for matching season numbers. The season number \
is extracted by looking for a'season' capture group."))
.arg(Arg::with_name("re-year")
.long("re-year")
.takes_value(true)
.default_value(r"\b(?P<year>[0-9]{4})\b")
.help("A regex for matching the year. The year is extracted by \
looking for a 'year' capture group."))
.arg(Arg::with_name("update-data")
.long("update-data")
.help("Forcefully refreshes the IMDb data and then exits."))
.arg(Arg::with_name("votes")
.long("votes")
.default_value("1000")
.help("The minimum number of votes required for results matching \
a query derived from existing file names. This is not \
applied to explicit queries via the -q/--query flag."))
.arg(Arg::with_name("update-index")
.long("update-index")
.help("Forcefully re-indexes the IMDb data and then exits."))
.arg(Arg::with_name("symlink")
.long("symlink")
.short("s")
.conflicts_with("hardlink")
.help("Create a symlink instead of renaming. \
(Unix only feature.)"))
.arg(Arg::with_name("hardlink")
.long("hardlink")
.short("H")
.conflicts_with("symlink")
.help("Create a hardlink instead of renaming. \
This doesn't work when renaming directories."))
}
/// Collect all file paths from a sequence of OsStrings from the command line.
/// If `follow` is true, then any paths that are directories are expanded to
/// include all child paths, recursively.
///
/// If there is an error following a path, then it is logged to stderr and
/// otherwise skipped.
fn collect_paths(paths: Vec<&OsStr>, follow: bool) -> Vec<PathBuf> {
let mut results = vec![];
for path in paths {
let path = PathBuf::from(path);
if!follow ||!path.is_dir() {
results.push(path);
continue;
}
for result in WalkDir::new(path) {
match result {
Ok(dent) => results.push(dent.path().to_path_buf()),
Err(err) => eprintln!("{}", err),
}
}
}
results
}
/// Return a prettily formatted error, including its entire causal chain.
fn pretty_error(err: &failure::Error) -> String {
let mut pretty = err.to_string();
let mut prev = err.as_fail();
while let Some(next) = prev.cause() {
pretty.push_str(": ");
pretty.push_str(&next.to_string());
prev = next;
}
pretty
}
/// Return true if and only if an I/O broken pipe error exists in the causal
/// chain of the given error.
fn is_pipe_error(err: &failure::Error) -> bool {
for cause in err.iter_chain() {
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return true;
}
}
}
false
}
| from_matches | identifier_name |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn | () -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
| connector_kafka_producer | identifier_name |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event | "field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
} | let batched_data = literal!([{
"data": {
"value": { | random_line_split |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> | num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
| {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic, | identifier_body |
producer.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::super::ConnectorHarness;
use super::redpanda_container;
use crate::connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => |
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0.0.1:{port}");
let topic = "unreachable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unresolvable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("i_do_not_resolve:{port}");
let topic = "unresolvable";
let connector_config = literal!({
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
"debug": "all"
}
}
});
let harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
assert!(harness.start().await.is_err());
Ok(())
}
| {
error!("Error creating topic {}: {}", &topic, err);
} | conditional_block |
exec.rs | use std::{
ops::DerefMut,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
};
use crate::vbus::{
BusSpawnedProcess, VirtualBusError, VirtualBusInvokable, VirtualBusProcess, VirtualBusScope,
};
use futures::Future;
use tokio::sync::mpsc;
use tracing::*;
use wasmer::{FunctionEnvMut, Instance, Memory, Module, Store};
use wasmer_wasi_types::wasi::{Errno, ExitCode};
use super::{BinFactory, BinaryPackage, ModuleCache};
use crate::{
import_object_for_all_wasi_versions, runtime::SpawnType, SpawnedMemory, WasiEnv, WasiError,
WasiFunctionEnv, WasiRuntime,
};
pub fn spawn_exec(
binary: BinaryPackage,
name: &str,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync +'static>,
compiled_modules: &ModuleCache,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn spawn_exec_module(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync +'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
struct UnsafeWrapper {
inner: Box<dyn FnOnce() +'static>,
}
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx | else if self.commands.exists(name.as_str()) {
tracing::warn!("builtin command without a parent ctx - {}", name);
}
Err(VirtualBusError::NotFound)
}
}
#[derive(Debug)]
pub(crate) struct SpawnedProcess {
pub exit_code: Mutex<Option<ExitCode>>,
pub exit_code_rx: Mutex<mpsc::UnboundedReceiver<ExitCode>>,
}
impl VirtualBusProcess for SpawnedProcess {
fn exit_code(&self) -> Option<ExitCode> {
let mut exit_code = self.exit_code.lock().unwrap();
if let Some(exit_code) = exit_code.as_ref() {
return Some(*exit_code);
}
let mut rx = self.exit_code_rx.lock().unwrap();
match rx.try_recv() {
Ok(code) => {
exit_code.replace(code);
Some(code)
}
Err(mpsc::error::TryRecvError::Disconnected) => {
let code = Errno::Canceled as ExitCode;
exit_code.replace(code);
Some(code)
}
_ => None,
}
}
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
{
let exit_code = self.exit_code.lock().unwrap();
if exit_code.is_some() {
return Poll::Ready(());
}
}
let mut rx = self.exit_code_rx.lock().unwrap();
let mut rx = Pin::new(rx.deref_mut());
match rx.poll_recv(cx) {
Poll::Ready(code) => {
let code = code.unwrap_or(Errno::Canceled as ExitCode);
{
let mut exit_code = self.exit_code.lock().unwrap();
exit_code.replace(code);
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl VirtualBusScope for SpawnedProcess {
fn poll_finished(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
VirtualBusProcess::poll_ready(self, cx)
}
}
impl VirtualBusInvokable for SpawnedProcess {}
| {
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
} | conditional_block |
exec.rs | use std::{
ops::DerefMut,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
};
use crate::vbus::{
BusSpawnedProcess, VirtualBusError, VirtualBusInvokable, VirtualBusProcess, VirtualBusScope,
};
use futures::Future;
use tokio::sync::mpsc;
use tracing::*;
use wasmer::{FunctionEnvMut, Instance, Memory, Module, Store};
use wasmer_wasi_types::wasi::{Errno, ExitCode};
use super::{BinFactory, BinaryPackage, ModuleCache};
use crate::{
import_object_for_all_wasi_versions, runtime::SpawnType, SpawnedMemory, WasiEnv, WasiError,
WasiFunctionEnv, WasiRuntime,
};
pub fn spawn_exec(
binary: BinaryPackage,
name: &str,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync +'static>,
compiled_modules: &ModuleCache,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn | (
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync +'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
struct UnsafeWrapper {
inner: Box<dyn FnOnce() +'static>,
}
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx {
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
} else if self.commands.exists(name.as_str()) {
tracing::warn!("builtin command without a parent ctx - {}", name);
}
Err(VirtualBusError::NotFound)
}
}
#[derive(Debug)]
pub(crate) struct SpawnedProcess {
pub exit_code: Mutex<Option<ExitCode>>,
pub exit_code_rx: Mutex<mpsc::UnboundedReceiver<ExitCode>>,
}
impl VirtualBusProcess for SpawnedProcess {
fn exit_code(&self) -> Option<ExitCode> {
let mut exit_code = self.exit_code.lock().unwrap();
if let Some(exit_code) = exit_code.as_ref() {
return Some(*exit_code);
}
let mut rx = self.exit_code_rx.lock().unwrap();
match rx.try_recv() {
Ok(code) => {
exit_code.replace(code);
Some(code)
}
Err(mpsc::error::TryRecvError::Disconnected) => {
let code = Errno::Canceled as ExitCode;
exit_code.replace(code);
Some(code)
}
_ => None,
}
}
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
{
let exit_code = self.exit_code.lock().unwrap();
if exit_code.is_some() {
return Poll::Ready(());
}
}
let mut rx = self.exit_code_rx.lock().unwrap();
let mut rx = Pin::new(rx.deref_mut());
match rx.poll_recv(cx) {
Poll::Ready(code) => {
let code = code.unwrap_or(Errno::Canceled as ExitCode);
{
let mut exit_code = self.exit_code.lock().unwrap();
exit_code.replace(code);
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl VirtualBusScope for SpawnedProcess {
fn poll_finished(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
VirtualBusProcess::poll_ready(self, cx)
}
}
impl VirtualBusInvokable for SpawnedProcess {}
| spawn_exec_module | identifier_name |
exec.rs | use std::{
ops::DerefMut,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
};
use crate::vbus::{
BusSpawnedProcess, VirtualBusError, VirtualBusInvokable, VirtualBusProcess, VirtualBusScope,
};
use futures::Future;
use tokio::sync::mpsc;
use tracing::*;
use wasmer::{FunctionEnvMut, Instance, Memory, Module, Store};
use wasmer_wasi_types::wasi::{Errno, ExitCode};
use super::{BinFactory, BinaryPackage, ModuleCache};
use crate::{
import_object_for_all_wasi_versions, runtime::SpawnType, SpawnedMemory, WasiEnv, WasiError,
WasiFunctionEnv, WasiRuntime,
};
pub fn spawn_exec(
binary: BinaryPackage,
name: &str,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync +'static>,
compiled_modules: &ModuleCache,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn spawn_exec_module(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync +'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = { |
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx {
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
} else if self.commands.exists(name.as_str()) {
tracing::warn!("builtin command without a parent ctx - {}", name);
}
Err(VirtualBusError::NotFound)
}
}
#[derive(Debug)]
pub(crate) struct SpawnedProcess {
pub exit_code: Mutex<Option<ExitCode>>,
pub exit_code_rx: Mutex<mpsc::UnboundedReceiver<ExitCode>>,
}
impl VirtualBusProcess for SpawnedProcess {
fn exit_code(&self) -> Option<ExitCode> {
let mut exit_code = self.exit_code.lock().unwrap();
if let Some(exit_code) = exit_code.as_ref() {
return Some(*exit_code);
}
let mut rx = self.exit_code_rx.lock().unwrap();
match rx.try_recv() {
Ok(code) => {
exit_code.replace(code);
Some(code)
}
Err(mpsc::error::TryRecvError::Disconnected) => {
let code = Errno::Canceled as ExitCode;
exit_code.replace(code);
Some(code)
}
_ => None,
}
}
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
{
let exit_code = self.exit_code.lock().unwrap();
if exit_code.is_some() {
return Poll::Ready(());
}
}
let mut rx = self.exit_code_rx.lock().unwrap();
let mut rx = Pin::new(rx.deref_mut());
match rx.poll_recv(cx) {
Poll::Ready(code) => {
let code = code.unwrap_or(Errno::Canceled as ExitCode);
{
let mut exit_code = self.exit_code.lock().unwrap();
exit_code.replace(code);
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl VirtualBusScope for SpawnedProcess {
fn poll_finished(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
VirtualBusProcess::poll_ready(self, cx)
}
}
impl VirtualBusInvokable for SpawnedProcess {} | struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
} | random_line_split |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! A crate that hosts a common definitions that are relevant for the pallet-contracts.
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::traits::{Currency, Time};
#[cfg(feature = "std")] |
#[cfg(feature = "std")]
use std::fmt::Debug;
use sp_std::prelude::*;
pub mod abi;
pub mod contract_metadata;
pub mod gateway_inbound_protocol;
pub mod transfers;
pub use gateway_inbound_protocol::GatewayInboundProtocol;
pub type ChainId = [u8; 4];
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayType {
ProgrammableInternal(u32),
ProgrammableExternal(u32),
TxOnly(u32),
}
impl GatewayType {
pub fn fetch_nonce(self) -> u32 {
match self {
Self::ProgrammableInternal(nonce) => nonce,
Self::ProgrammableExternal(nonce) => nonce,
Self::TxOnly(nonce) => nonce,
}
}
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayVendor {
Substrate,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
/// Structure used at gateway registration as a starting point for multi-finality-verifier
pub struct GenericPrimitivesHeader {
pub parent_hash: Option<sp_core::hash::H256>,
pub number: u64,
pub state_root: Option<sp_core::hash::H256>,
pub extrinsics_root: Option<sp_core::hash::H256>,
pub digest: Option<sp_runtime::generic::Digest<sp_core::hash::H256>>,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayPointer {
pub id: ChainId,
pub vendor: GatewayVendor,
pub gateway_type: GatewayType,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayGenesisConfig {
/// SCALE-encoded modules following the format of selected frame_metadata::RuntimeMetadataVXX
pub modules_encoded: Option<Vec<u8>>,
/// SCALE-encoded signed extension - see more at frame_metadata::ExtrinsicMetadata
pub signed_extension: Option<Vec<u8>>,
/// Runtime version
pub runtime_version: sp_version::RuntimeVersion,
/// Extrinsics version
pub extrinsics_version: u8,
/// Genesis hash - block id of the genesis block use to distinct the network and sign messages
/// Length depending on parameter passed in abi::GatewayABIConfig
pub genesis_hash: Vec<u8>,
}
impl Default for GatewayGenesisConfig {
fn default() -> Self {
Self {
extrinsics_version: 0,
runtime_version: Default::default(),
genesis_hash: vec![],
modules_encoded: None,
signed_extension: None,
}
}
}
/// A struct that encodes RPC parameters required for a call to a smart-contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct Compose<Account, Balance> {
pub name: Vec<u8>,
pub code_txt: Vec<u8>,
pub exec_type: Vec<u8>,
pub dest: Account,
pub value: Balance,
pub bytes: Vec<u8>,
pub input_data: Vec<u8>,
}
/// A result type of a get storage call.
pub type FetchContractsResult = Result<Vec<u8>, ContractAccessError>;
pub type RegistryContractId<T> = <T as frame_system::Config>::Hash;
/// A result of execution of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ComposableExecResult {
/// The contract returned successfully.
///
/// There is a status code and, optionally, some data returned by the contract.
Success {
/// Flags that the contract passed along on returning to alter its exit behaviour.
/// Described in `pallet_contracts::exec::ReturnFlags`.
flags: u32,
/// Output data returned by the contract.
///
/// Can be empty.
data: Vec<u8>,
/// How much gas was consumed by the call.
gas_consumed: u64,
},
/// The contract execution either trapped or returned an error.
Error,
}
/// The possible errors that can happen querying the storage of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ContractAccessError {
/// The given address doesn't point to a contract.
DoesntExist,
/// The specified contract is a tombstone and thus cannot have any storage.
IsTombstone,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecPhase<Account, Balance> {
pub steps: Vec<ExecStep<Account, Balance>>,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecStep<Account, Balance> {
pub compose: Compose<Account, Balance>,
}
pub type GenericAddress = sp_runtime::MultiAddress<sp_runtime::AccountId32, ()>;
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct InterExecSchedule<Account, Balance> {
pub phases: Vec<ExecPhase<Account, Balance>>,
}
pub trait EscrowTrait: frame_system::Config + pallet_sudo::Config {
type Currency: Currency<Self::AccountId>;
type Time: Time;
}
type Bytes = Vec<u8>;
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Request message format that derivative of could be compatible with JSON-RPC API
/// with either signed or unsigned payload or custom transmission medium like XCMP protocol
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitOutboundMessage {
/// Message name/identifier
pub name: Bytes,
/// Module/pallet name
pub module_name: Bytes,
/// Method name
pub method_name: Bytes,
/// Encoded sender's public key
pub sender: Option<Bytes>,
/// Encoded target's public key
pub target: Option<Bytes>,
/// Array of next arguments: encoded bytes of arguments that that JSON-RPC API expects
pub arguments: Vec<Bytes>,
/// Expected results
pub expected_output: Vec<GatewayExpectedOutput>,
/// Extra payload in case the message is signed or uses custom delivery protocols like XCMP
pub extra_payload: Option<ExtraMessagePayload>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadUnsigned<'a> {
pub method_name: &'a str,
pub params: Vec<Bytes>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadSigned<'a> {
pub method_name: &'a str,
pub signed_extrinsic: Bytes,
}
impl CircuitOutboundMessage {
pub fn to_jsonrpc_unsigned(&self) -> Result<RpcPayloadUnsigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
Ok(RpcPayloadUnsigned {
method_name,
params: self.arguments.clone(),
})
}
pub fn to_jsonrpc_signed(&self) -> Result<RpcPayloadSigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
let signed_ext = self
.extra_payload
.as_ref()
.map(|payload| payload.tx_signed.clone())
.ok_or("no signed extrinsic provided")?;
Ok(RpcPayloadSigned {
method_name,
signed_extrinsic: signed_ext,
})
}
}
/// Inclusion proofs of different tries
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ProofTriePointer {
/// Proof is a merkle path in the state trie
State,
/// Proof is a merkle path in the transaction trie (extrisics in Substrate)
Transaction,
/// Proof is a merkle path in the receipts trie (in Substrate logs are entries in state trie, this doesn't apply)
Receipts,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitInboundResult {
pub result_format: Bytes,
pub proof_type: ProofTriePointer,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayExpectedOutput {
/// Effect would be the modified storage key
Storage {
key: Vec<Vec<u8>>,
// key: Vec<sp_core::storage::StorageKey>,
// value: Vec<Option<sp_core::storage::StorageData>>,
value: Vec<Option<Bytes>>,
},
/// Expect events as a result of that call - will be described with signature
/// and check against the corresponding types upon receiving
Events { signatures: Vec<Bytes> },
/// Yet another event or Storage output
Extrinsic {
/// Optionally expect dispatch of extrinsic only at the certain block height
block_height: Option<u64>,
},
/// Yet another event or Storage output. If expecting output u can define its type format.
Output { output: Bytes },
}
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Extra payload in case the message is signed ro has other custom parameters required by linking protocol.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExtraMessagePayload {
pub signer: Bytes,
/// Encoded utf-8 string of module name that implements requested entrypoint
pub module_name: Bytes,
/// Encoded utf-8 string of method name that implements requested entrypoint
pub method_name: Bytes,
/// Encoded call bytes
pub call_bytes: Bytes,
/// Encoded tx signature
pub signature: Bytes,
/// Encoded extras to that transctions, like versions and gas price /tips for miners. Check GenericExtra for more info.
pub extra: Bytes,
/// Encoded and signed transaction ready to send
pub tx_signed: Bytes,
/// Custom message bytes, that would have to be decoded by the receiving end.
/// Could be utilized by custom transmission medium (like Substrate's XCMP)
pub custom_payload: Option<Bytes>,
}
/// Retrieves all available gateways for a given ChainId.
/// Currently returns a vector with a single hardcoded result.
/// Eventually this will search all known gateways on pallet-xdns.
pub fn retrieve_gateway_pointers(gateway_id: ChainId) -> Result<Vec<GatewayPointer>, &'static str> {
Ok(vec![GatewayPointer {
id: gateway_id,
gateway_type: GatewayType::ProgrammableExternal(0),
vendor: GatewayVendor::Substrate,
}])
} | use serde::{Deserialize, Serialize};
#[cfg(feature = "no_std")]
use sp_runtime::RuntimeDebug as Debug; | random_line_split |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! A crate that hosts a common definitions that are relevant for the pallet-contracts.
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::traits::{Currency, Time};
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "no_std")]
use sp_runtime::RuntimeDebug as Debug;
#[cfg(feature = "std")]
use std::fmt::Debug;
use sp_std::prelude::*;
pub mod abi;
pub mod contract_metadata;
pub mod gateway_inbound_protocol;
pub mod transfers;
pub use gateway_inbound_protocol::GatewayInboundProtocol;
pub type ChainId = [u8; 4];
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayType {
ProgrammableInternal(u32),
ProgrammableExternal(u32),
TxOnly(u32),
}
impl GatewayType {
pub fn fetch_nonce(self) -> u32 {
match self {
Self::ProgrammableInternal(nonce) => nonce,
Self::ProgrammableExternal(nonce) => nonce,
Self::TxOnly(nonce) => nonce,
}
}
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayVendor {
Substrate,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
/// Structure used at gateway registration as a starting point for multi-finality-verifier
pub struct GenericPrimitivesHeader {
pub parent_hash: Option<sp_core::hash::H256>,
pub number: u64,
pub state_root: Option<sp_core::hash::H256>,
pub extrinsics_root: Option<sp_core::hash::H256>,
pub digest: Option<sp_runtime::generic::Digest<sp_core::hash::H256>>,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayPointer {
pub id: ChainId,
pub vendor: GatewayVendor,
pub gateway_type: GatewayType,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayGenesisConfig {
/// SCALE-encoded modules following the format of selected frame_metadata::RuntimeMetadataVXX
pub modules_encoded: Option<Vec<u8>>,
/// SCALE-encoded signed extension - see more at frame_metadata::ExtrinsicMetadata
pub signed_extension: Option<Vec<u8>>,
/// Runtime version
pub runtime_version: sp_version::RuntimeVersion,
/// Extrinsics version
pub extrinsics_version: u8,
/// Genesis hash - block id of the genesis block use to distinct the network and sign messages
/// Length depending on parameter passed in abi::GatewayABIConfig
pub genesis_hash: Vec<u8>,
}
impl Default for GatewayGenesisConfig {
fn default() -> Self {
Self {
extrinsics_version: 0,
runtime_version: Default::default(),
genesis_hash: vec![],
modules_encoded: None,
signed_extension: None,
}
}
}
/// A struct that encodes RPC parameters required for a call to a smart-contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct Compose<Account, Balance> {
pub name: Vec<u8>,
pub code_txt: Vec<u8>,
pub exec_type: Vec<u8>,
pub dest: Account,
pub value: Balance,
pub bytes: Vec<u8>,
pub input_data: Vec<u8>,
}
/// A result type of a get storage call.
pub type FetchContractsResult = Result<Vec<u8>, ContractAccessError>;
pub type RegistryContractId<T> = <T as frame_system::Config>::Hash;
/// A result of execution of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum | {
/// The contract returned successfully.
///
/// There is a status code and, optionally, some data returned by the contract.
Success {
/// Flags that the contract passed along on returning to alter its exit behaviour.
/// Described in `pallet_contracts::exec::ReturnFlags`.
flags: u32,
/// Output data returned by the contract.
///
/// Can be empty.
data: Vec<u8>,
/// How much gas was consumed by the call.
gas_consumed: u64,
},
/// The contract execution either trapped or returned an error.
Error,
}
/// The possible errors that can happen querying the storage of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ContractAccessError {
/// The given address doesn't point to a contract.
DoesntExist,
/// The specified contract is a tombstone and thus cannot have any storage.
IsTombstone,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecPhase<Account, Balance> {
pub steps: Vec<ExecStep<Account, Balance>>,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecStep<Account, Balance> {
pub compose: Compose<Account, Balance>,
}
pub type GenericAddress = sp_runtime::MultiAddress<sp_runtime::AccountId32, ()>;
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct InterExecSchedule<Account, Balance> {
pub phases: Vec<ExecPhase<Account, Balance>>,
}
pub trait EscrowTrait: frame_system::Config + pallet_sudo::Config {
type Currency: Currency<Self::AccountId>;
type Time: Time;
}
type Bytes = Vec<u8>;
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Request message format that derivative of could be compatible with JSON-RPC API
/// with either signed or unsigned payload or custom transmission medium like XCMP protocol
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitOutboundMessage {
/// Message name/identifier
pub name: Bytes,
/// Module/pallet name
pub module_name: Bytes,
/// Method name
pub method_name: Bytes,
/// Encoded sender's public key
pub sender: Option<Bytes>,
/// Encoded target's public key
pub target: Option<Bytes>,
/// Array of next arguments: encoded bytes of arguments that that JSON-RPC API expects
pub arguments: Vec<Bytes>,
/// Expected results
pub expected_output: Vec<GatewayExpectedOutput>,
/// Extra payload in case the message is signed or uses custom delivery protocols like XCMP
pub extra_payload: Option<ExtraMessagePayload>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadUnsigned<'a> {
pub method_name: &'a str,
pub params: Vec<Bytes>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadSigned<'a> {
pub method_name: &'a str,
pub signed_extrinsic: Bytes,
}
impl CircuitOutboundMessage {
pub fn to_jsonrpc_unsigned(&self) -> Result<RpcPayloadUnsigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
Ok(RpcPayloadUnsigned {
method_name,
params: self.arguments.clone(),
})
}
pub fn to_jsonrpc_signed(&self) -> Result<RpcPayloadSigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
let signed_ext = self
.extra_payload
.as_ref()
.map(|payload| payload.tx_signed.clone())
.ok_or("no signed extrinsic provided")?;
Ok(RpcPayloadSigned {
method_name,
signed_extrinsic: signed_ext,
})
}
}
/// Inclusion proofs of different tries
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ProofTriePointer {
/// Proof is a merkle path in the state trie
State,
/// Proof is a merkle path in the transaction trie (extrisics in Substrate)
Transaction,
/// Proof is a merkle path in the receipts trie (in Substrate logs are entries in state trie, this doesn't apply)
Receipts,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitInboundResult {
pub result_format: Bytes,
pub proof_type: ProofTriePointer,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayExpectedOutput {
/// Effect would be the modified storage key
Storage {
key: Vec<Vec<u8>>,
// key: Vec<sp_core::storage::StorageKey>,
// value: Vec<Option<sp_core::storage::StorageData>>,
value: Vec<Option<Bytes>>,
},
/// Expect events as a result of that call - will be described with signature
/// and check against the corresponding types upon receiving
Events { signatures: Vec<Bytes> },
/// Yet another event or Storage output
Extrinsic {
/// Optionally expect dispatch of extrinsic only at the certain block height
block_height: Option<u64>,
},
/// Yet another event or Storage output. If expecting output u can define its type format.
Output { output: Bytes },
}
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Extra payload in case the message is signed ro has other custom parameters required by linking protocol.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExtraMessagePayload {
pub signer: Bytes,
/// Encoded utf-8 string of module name that implements requested entrypoint
pub module_name: Bytes,
/// Encoded utf-8 string of method name that implements requested entrypoint
pub method_name: Bytes,
/// Encoded call bytes
pub call_bytes: Bytes,
/// Encoded tx signature
pub signature: Bytes,
/// Encoded extras to that transctions, like versions and gas price /tips for miners. Check GenericExtra for more info.
pub extra: Bytes,
/// Encoded and signed transaction ready to send
pub tx_signed: Bytes,
/// Custom message bytes, that would have to be decoded by the receiving end.
/// Could be utilized by custom transmission medium (like Substrate's XCMP)
pub custom_payload: Option<Bytes>,
}
/// Retrieves all available gateways for a given ChainId.
/// Currently returns a vector with a single hardcoded result.
/// Eventually this will search all known gateways on pallet-xdns.
pub fn retrieve_gateway_pointers(gateway_id: ChainId) -> Result<Vec<GatewayPointer>, &'static str> {
Ok(vec![GatewayPointer {
id: gateway_id,
gateway_type: GatewayType::ProgrammableExternal(0),
vendor: GatewayVendor::Substrate,
}])
}
| ComposableExecResult | identifier_name |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
| let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
} | let parsed = Url::parse(&url_dirty).unwrap(); | random_line_split |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn | (&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
}
| clean_url | identifier_name |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) | else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
}
| {
println!("key found: {:?}", key);
modified = true;
} | conditional_block |
lib.rs | use url::*;
pub struct Example<'a> {
dirty: &'a str,
clean: &'a str,
}
impl<'a> Example<'a> {
pub const fn new(dirty: &'a str, clean: &'a str) -> Self {
Self { dirty, clean }
}
}
/// Contains directives on how to extract the link from a click-tracking link forwarder.
pub struct CleanInformation<'a> {
/// The domain which is used to forward
domain: &'a str,
/// The path at the given domain that will the tracking-url will send tracking information to
path: &'a str,
/// The query parameter that the actual link of interest is sent as
querykey: &'a str,
#[allow(unused)]
example: Option<Example<'a>>,
}
/// When these keys are part of the url query parameters, they will be removed from the link
/// So that if the parameters contain something like "www.example.com/foo?param1=bar&fbclid=1234",
/// the resulting query string will become something simlar to "www.example.com/foo?param1=bar"
/// with the click id query parameter "fbclid" removed
const KEYS_TO_CLEAN: [&'static str; 3] = ["fbclid", "custlinkid", "gclid"];
/// Five commonly used tracking forwarders that are going to be cleaned
const DOMAINS_TO_CLEAN: [CleanInformation<'static>; 7] = {
[
CleanInformation {
domain: "l.facebook.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new("https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ",
"https://www.youtube.com/watch?v=uBKajwUM5v4")),
},
CleanInformation {
domain: "l.messenger.com",
path: "/l.php",
querykey: "u",
example: Some(
Example::new(
"https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> | // Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%26custlinkid%3D37737%26fbclid%3DIwAR0ZRlKtl4NJgkCGMuiVNuxnL3GUVnw0kCLSmwNFD_xqiUv83U_dVP-6X8A&h=AT1jV6cBYrlCCqMs2RUB2mHXcyuSq4zO_1safL4SYIvxkwWVDs7xViyTB1dYm-84aACs8qfshYEHY0pS8o2H0cdRw51mK9ZQGmKZlodbgvCkZhs3v1LxumxDGCHcIey-8M1sLH1gXAN6";
let url_clean = "https://www.banggood.com/XT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html?p=JQ191716342021201711";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_messenger() {
let url_dirty ="https://l.messenger.com/l.php?u=https%3A%2F%2Fwww.reddit.com%2Fr%2FDnD%2Fcomments%2Fbzi1oq%2Fart_two_dragons_and_adopted_kobold_son%2F&h=AT3-avlfmolqmJ6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U";
let url_clean =
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_google_meeting() {
let url = "https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g";
let url_clean = "https://meet.lync.com/skydrive3m-mmm/random/random?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook_image() {
let url = "https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf";
let url_clean = "https://i.redd.it/4wao306sl9931.jpg?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_youtube_chat_link() {
let url = "https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA";
let url_clean = "https://forms.gle/QDyXJVu6x24UYErEA?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_teams_link() {
let url = "https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0";
let url_clean = "http://www.regitart.se/Default.aspx?";
let parsed = Url::parse(&url).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn test_all_examples() {
for cleaner in &DOMAINS_TO_CLEAN {
if let Some(example) = &cleaner.example {
let url_dirty = &example.dirty;
let url_clean = &example.clean;
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(&clean, url_clean);
}
}
}
}
| {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean"); | identifier_body |
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while!buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if!state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() | {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
} | identifier_body |
|
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while!buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if!state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => |
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
}
| {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
} | conditional_block |
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget, | pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while!buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if!state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
} | random_line_split |
|
gdb.rs | use gdbstub::common::{Signal, Tid};
use gdbstub::conn::Connection;
use gdbstub::stub::state_machine::GdbStubStateMachine;
use gdbstub::stub::{GdbStubBuilder, GdbStubError, MultiThreadStopReason};
use gdbstub::target::Target;
use crate::io::SerialRead;
use crate::platform::precursor::gdbuart::GdbUart;
mod breakpoints;
mod current_active_pid;
mod extended_mode;
mod monitor;
mod multi_thread_base;
mod multi_thread_resume;
mod multi_thread_single_step;
mod single_register_access;
mod target;
#[cfg(target_arch = "riscv32")]
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn | () -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while!buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if!state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init() {
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
}
| default | identifier_name |
theme.rs | use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token};
use orbclient::Color;
use std::collections::HashSet;
use std::sync::Arc;
use std::mem;
use std::ops::Add;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::Read;
static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css");
lazy_static! {
static ref DEFAULT_THEME: Arc<Theme> = {
Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)})
};
}
pub struct Theme {
parent: Option<Arc<Theme>>,
rules: Vec<Rule>,
}
impl Theme {
pub fn new() -> Self {
Theme::parse("")
}
pub fn parse(s: &str) -> Self {
Theme {
parent: Some(DEFAULT_THEME.clone()),
rules: parse(s),
}
}
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> {
let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err)));
let mut reader = BufReader::new(file);
let mut css = String::new();
let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err));
match res {
Ok(_) => Ok(Theme::parse(&css)),
Err(err) => Err(err),
}
}
fn all_rules(&self) -> Vec<Rule> {
if let Some(ref parent) = self.parent {
self.rules.iter().chain(parent.rules.iter()).cloned().collect()
} else {
self.rules.clone()
}
}
pub fn get(&self, property: &str, query: &Selector) -> Option<Value> {
let mut matches: Vec<(bool, Specificity, Value)> = Vec::new();
for rule in self.all_rules().iter().rev() {
let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>();
if matching_selectors.len() > 0 {
if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) {
let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap();
matches.push((decl.important, highest_specifity, decl.value.clone()));
}
}
}
matches.sort_by_key(|x| (x.0, x.1));
matches.last().map(|x| x.2.clone())
}
pub fn color(&self, property: &str, query: &Selector) -> Color {
let default = Color { data: 0 };
self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default)
}
pub fn uint(&self, property: &str, query: &Selector) -> u32 {
self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0)
}
}
#[derive(Clone, Debug)]
pub struct Rule {
pub selectors: Vec<Selector>,
pub declarations: Vec<Declaration>,
}
#[derive(Clone, Debug)]
pub enum SelectorRelation {
Ancestor(Selector),
Parent(Selector),
}
impl<T: Into<String>> From<T> for Selector {
fn from(t: T) -> Self {
Selector::new(Some(t.into()))
}
}
/// Describes the specificity of a selector.
///
/// The indexes are as follows:
/// 0 - number of IDs (most important)
/// 1 - number of classes and pseudo-classes
/// 2 - number of elements (least important)
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Specificity([u8; 3]);
impl Add<Self> for Specificity {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Specificity([
self.0[0] + rhs.0[0],
self.0[1] + rhs.0[1],
self.0[2] + rhs.0[2],
])
}
}
#[derive(Clone, Debug, Default)]
pub struct Selector {
pub element: Option<String>,
pub classes: HashSet<String>,
pub pseudo_classes: HashSet<String>,
pub relation: Option<Box<SelectorRelation>>,
}
impl Selector {
pub fn new<S: Into<String>>(element: Option<S>) -> Self {
Selector {
element: element.map(|s| s.into()),
classes: HashSet::new(),
pseudo_classes: HashSet::new(),
relation: None,
}
}
fn specificity(&self) -> Specificity {
let s = Specificity([
0,
(self.classes.len() + self.pseudo_classes.len()) as u8,
if self.element.is_some() { 1 } else { 0 }
]);
if let Some(ref relation) = self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element!= other.element {
return false;
}
if!other.classes.is_superset(&self.classes) {
return false;
}
if!other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.insert(pseudo_class.into());
self
}
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> {
let value = match &*name {
"color" | "border-color" => Value::Color(parse_basic_color(input)?),
"background" | "foreground" => Value::Color(parse_basic_color(input)?),
"border-radius" | "border-width" => {
match input.next()? {
Token::Number { int_value: Some(x), has_sign,.. } if!has_sign && x >= 0 => Value::UInt(x as u32),
t => return Err(BasicParseError::UnexpectedToken(t).into())
}
}
_ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()),
};
Ok(Declaration {
property: name.into_owned(),
value: value,
important: input.try(cssparser::parse_important).is_ok()
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser {
type Prelude = ();
type AtRule = Declaration;
type Error = CustomParseError;
}
fn css_color(name: &str) -> Option<Color> {
Some(hex(match name {
"transparent" => return Some(Color { data: 0 }),
"black" => 0x000000,
"silver" => 0xc0c0c0,
"gray" | "grey" => 0x808080,
"white" => 0xffffff,
"maroon" => 0x800000,
"red" => 0xff0000,
"purple" => 0x800080,
"fuchsia" => 0xff00ff,
"green" => 0x008000,
"lime" => 0x00ff00,
"olive" => 0x808000,
"yellow" => 0xffff00,
"navy" => 0x000080,
"blue" => 0x0000ff,
"teal" => 0x008080,
"aqua" => 0x00ffff,
_ => return None,
}))
}
fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> {
Ok(match input.next()? {
Token::Ident(s) => match css_color(&s) {
Some(color) => color,
None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()),
},
Token::IDHash(hash) | Token::Hash(hash) => {
match hash.len() {
6 | 8 => {
let mut x = match u32::from_str_radix(&hash, 16) {
Ok(x) => x,
Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
};
if hash.len() == 6 {
x |= 0xFF000000;
}
Color { data: x }
},
_ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
}
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
})
}
fn parse(s: &str) -> Vec<Rule> {
let mut input = ParserInput::new(s);
let mut parser = Parser::new(&mut input);
let rule_parser = RuleParser::new();
let rules = {
let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser);
rule_list_parser.collect::<Vec<_>>()
};
for rule in &rules {
match *rule {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", parser.slice(e.span.clone()));
}
}
}
rules.into_iter().filter_map(|rule| rule.ok()).collect()
}
const fn | (data: u32) -> Color {
Color { data: 0xFF000000 | data }
}
| hex | identifier_name |
theme.rs | use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token};
use orbclient::Color;
use std::collections::HashSet;
use std::sync::Arc;
use std::mem;
use std::ops::Add;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::Read;
static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css");
lazy_static! {
static ref DEFAULT_THEME: Arc<Theme> = {
Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)})
};
}
pub struct Theme {
parent: Option<Arc<Theme>>,
rules: Vec<Rule>,
}
impl Theme {
pub fn new() -> Self {
Theme::parse("")
}
pub fn parse(s: &str) -> Self {
Theme {
parent: Some(DEFAULT_THEME.clone()),
rules: parse(s),
}
}
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> {
let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err)));
let mut reader = BufReader::new(file);
let mut css = String::new();
let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err));
match res {
Ok(_) => Ok(Theme::parse(&css)),
Err(err) => Err(err),
}
}
fn all_rules(&self) -> Vec<Rule> {
if let Some(ref parent) = self.parent {
self.rules.iter().chain(parent.rules.iter()).cloned().collect()
} else {
self.rules.clone()
}
}
pub fn get(&self, property: &str, query: &Selector) -> Option<Value> {
let mut matches: Vec<(bool, Specificity, Value)> = Vec::new();
for rule in self.all_rules().iter().rev() {
let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>();
if matching_selectors.len() > 0 {
if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) {
let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap();
matches.push((decl.important, highest_specifity, decl.value.clone()));
}
}
}
matches.sort_by_key(|x| (x.0, x.1));
matches.last().map(|x| x.2.clone())
}
pub fn color(&self, property: &str, query: &Selector) -> Color {
let default = Color { data: 0 };
self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default)
}
pub fn uint(&self, property: &str, query: &Selector) -> u32 {
self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0)
}
}
#[derive(Clone, Debug)]
pub struct Rule {
pub selectors: Vec<Selector>,
pub declarations: Vec<Declaration>,
}
#[derive(Clone, Debug)]
pub enum SelectorRelation {
Ancestor(Selector),
Parent(Selector),
}
impl<T: Into<String>> From<T> for Selector {
fn from(t: T) -> Self {
Selector::new(Some(t.into()))
}
}
/// Describes the specificity of a selector.
///
/// The indexes are as follows:
/// 0 - number of IDs (most important)
/// 1 - number of classes and pseudo-classes
/// 2 - number of elements (least important)
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Specificity([u8; 3]);
impl Add<Self> for Specificity {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Specificity([
self.0[0] + rhs.0[0],
self.0[1] + rhs.0[1],
self.0[2] + rhs.0[2],
])
}
}
#[derive(Clone, Debug, Default)]
pub struct Selector {
pub element: Option<String>,
pub classes: HashSet<String>,
pub pseudo_classes: HashSet<String>,
pub relation: Option<Box<SelectorRelation>>,
}
impl Selector {
pub fn new<S: Into<String>>(element: Option<S>) -> Self {
Selector {
element: element.map(|s| s.into()),
classes: HashSet::new(),
pseudo_classes: HashSet::new(),
relation: None,
}
}
fn specificity(&self) -> Specificity {
let s = Specificity([
0,
(self.classes.len() + self.pseudo_classes.len()) as u8,
if self.element.is_some() { 1 } else { 0 }
]);
if let Some(ref relation) = self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element!= other.element {
return false;
}
if!other.classes.is_superset(&self.classes) {
return false;
}
if!other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.insert(pseudo_class.into());
self
}
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> {
let value = match &*name {
"color" | "border-color" => Value::Color(parse_basic_color(input)?),
"background" | "foreground" => Value::Color(parse_basic_color(input)?),
"border-radius" | "border-width" => {
match input.next()? {
Token::Number { int_value: Some(x), has_sign,.. } if!has_sign && x >= 0 => Value::UInt(x as u32),
t => return Err(BasicParseError::UnexpectedToken(t).into())
}
}
_ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()),
};
Ok(Declaration {
property: name.into_owned(),
value: value,
important: input.try(cssparser::parse_important).is_ok()
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser {
type Prelude = ();
type AtRule = Declaration;
type Error = CustomParseError;
}
fn css_color(name: &str) -> Option<Color> {
Some(hex(match name {
"transparent" => return Some(Color { data: 0 }),
"black" => 0x000000,
"silver" => 0xc0c0c0,
"gray" | "grey" => 0x808080,
"white" => 0xffffff,
"maroon" => 0x800000,
"red" => 0xff0000,
"purple" => 0x800080,
"fuchsia" => 0xff00ff,
"green" => 0x008000,
"lime" => 0x00ff00,
"olive" => 0x808000,
"yellow" => 0xffff00,
"navy" => 0x000080,
"blue" => 0x0000ff,
"teal" => 0x008080,
"aqua" => 0x00ffff,
_ => return None,
}))
}
fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> {
Ok(match input.next()? {
Token::Ident(s) => match css_color(&s) {
Some(color) => color,
None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()),
},
Token::IDHash(hash) | Token::Hash(hash) => {
match hash.len() {
6 | 8 => | ,
_ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
}
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
})
}
fn parse(s: &str) -> Vec<Rule> {
let mut input = ParserInput::new(s);
let mut parser = Parser::new(&mut input);
let rule_parser = RuleParser::new();
let rules = {
let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser);
rule_list_parser.collect::<Vec<_>>()
};
for rule in &rules {
match *rule {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", parser.slice(e.span.clone()));
}
}
}
rules.into_iter().filter_map(|rule| rule.ok()).collect()
}
const fn hex(data: u32) -> Color {
Color { data: 0xFF000000 | data }
}
| {
let mut x = match u32::from_str_radix(&hash, 16) {
Ok(x) => x,
Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
};
if hash.len() == 6 {
x |= 0xFF000000;
}
Color { data: x }
} | conditional_block |
theme.rs | use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token};
use orbclient::Color;
use std::collections::HashSet;
use std::sync::Arc;
use std::mem;
use std::ops::Add;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::Read;
static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css");
lazy_static! {
static ref DEFAULT_THEME: Arc<Theme> = {
Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)})
};
}
pub struct Theme {
parent: Option<Arc<Theme>>,
rules: Vec<Rule>,
}
impl Theme {
pub fn new() -> Self {
Theme::parse("")
}
pub fn parse(s: &str) -> Self {
Theme {
parent: Some(DEFAULT_THEME.clone()),
rules: parse(s),
}
}
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> {
let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err)));
let mut reader = BufReader::new(file);
let mut css = String::new();
let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err));
match res {
Ok(_) => Ok(Theme::parse(&css)),
Err(err) => Err(err),
}
}
fn all_rules(&self) -> Vec<Rule> {
if let Some(ref parent) = self.parent {
self.rules.iter().chain(parent.rules.iter()).cloned().collect()
} else {
self.rules.clone()
}
}
pub fn get(&self, property: &str, query: &Selector) -> Option<Value> {
let mut matches: Vec<(bool, Specificity, Value)> = Vec::new();
for rule in self.all_rules().iter().rev() {
let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>();
if matching_selectors.len() > 0 {
if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) {
let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap();
matches.push((decl.important, highest_specifity, decl.value.clone()));
}
}
}
matches.sort_by_key(|x| (x.0, x.1));
matches.last().map(|x| x.2.clone())
}
pub fn color(&self, property: &str, query: &Selector) -> Color {
let default = Color { data: 0 };
self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default)
}
pub fn uint(&self, property: &str, query: &Selector) -> u32 {
self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0)
}
}
#[derive(Clone, Debug)]
pub struct Rule {
pub selectors: Vec<Selector>,
pub declarations: Vec<Declaration>,
}
#[derive(Clone, Debug)]
pub enum SelectorRelation {
Ancestor(Selector),
Parent(Selector),
}
impl<T: Into<String>> From<T> for Selector {
fn from(t: T) -> Self {
Selector::new(Some(t.into()))
}
}
/// Describes the specificity of a selector.
///
/// The indexes are as follows:
/// 0 - number of IDs (most important)
/// 1 - number of classes and pseudo-classes
/// 2 - number of elements (least important)
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Specificity([u8; 3]);
impl Add<Self> for Specificity {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Specificity([
self.0[0] + rhs.0[0],
self.0[1] + rhs.0[1], | }
#[derive(Clone, Debug, Default)]
pub struct Selector {
pub element: Option<String>,
pub classes: HashSet<String>,
pub pseudo_classes: HashSet<String>,
pub relation: Option<Box<SelectorRelation>>,
}
impl Selector {
pub fn new<S: Into<String>>(element: Option<S>) -> Self {
Selector {
element: element.map(|s| s.into()),
classes: HashSet::new(),
pseudo_classes: HashSet::new(),
relation: None,
}
}
fn specificity(&self) -> Specificity {
let s = Specificity([
0,
(self.classes.len() + self.pseudo_classes.len()) as u8,
if self.element.is_some() { 1 } else { 0 }
]);
if let Some(ref relation) = self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element!= other.element {
return false;
}
if!other.classes.is_superset(&self.classes) {
return false;
}
if!other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.insert(pseudo_class.into());
self
}
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> {
let value = match &*name {
"color" | "border-color" => Value::Color(parse_basic_color(input)?),
"background" | "foreground" => Value::Color(parse_basic_color(input)?),
"border-radius" | "border-width" => {
match input.next()? {
Token::Number { int_value: Some(x), has_sign,.. } if!has_sign && x >= 0 => Value::UInt(x as u32),
t => return Err(BasicParseError::UnexpectedToken(t).into())
}
}
_ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()),
};
Ok(Declaration {
property: name.into_owned(),
value: value,
important: input.try(cssparser::parse_important).is_ok()
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser {
type Prelude = ();
type AtRule = Declaration;
type Error = CustomParseError;
}
fn css_color(name: &str) -> Option<Color> {
Some(hex(match name {
"transparent" => return Some(Color { data: 0 }),
"black" => 0x000000,
"silver" => 0xc0c0c0,
"gray" | "grey" => 0x808080,
"white" => 0xffffff,
"maroon" => 0x800000,
"red" => 0xff0000,
"purple" => 0x800080,
"fuchsia" => 0xff00ff,
"green" => 0x008000,
"lime" => 0x00ff00,
"olive" => 0x808000,
"yellow" => 0xffff00,
"navy" => 0x000080,
"blue" => 0x0000ff,
"teal" => 0x008080,
"aqua" => 0x00ffff,
_ => return None,
}))
}
fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> {
Ok(match input.next()? {
Token::Ident(s) => match css_color(&s) {
Some(color) => color,
None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()),
},
Token::IDHash(hash) | Token::Hash(hash) => {
match hash.len() {
6 | 8 => {
let mut x = match u32::from_str_radix(&hash, 16) {
Ok(x) => x,
Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
};
if hash.len() == 6 {
x |= 0xFF000000;
}
Color { data: x }
},
_ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
}
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
})
}
fn parse(s: &str) -> Vec<Rule> {
let mut input = ParserInput::new(s);
let mut parser = Parser::new(&mut input);
let rule_parser = RuleParser::new();
let rules = {
let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser);
rule_list_parser.collect::<Vec<_>>()
};
for rule in &rules {
match *rule {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", parser.slice(e.span.clone()));
}
}
}
rules.into_iter().filter_map(|rule| rule.ok()).collect()
}
const fn hex(data: u32) -> Color {
Color { data: 0xFF000000 | data }
} | self.0[2] + rhs.0[2],
])
} | random_line_split |
theme.rs | use cssparser::{self, BasicParseError, CompactCowStr, DeclarationListParser, Parser, ParseError, ParserInput, Token};
use orbclient::Color;
use std::collections::HashSet;
use std::sync::Arc;
use std::mem;
use std::ops::Add;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::Read;
static DEFAULT_THEME_CSS: &'static str = include_str!("theme.css");
lazy_static! {
static ref DEFAULT_THEME: Arc<Theme> = {
Arc::new(Theme {parent: None, rules: parse(DEFAULT_THEME_CSS)})
};
}
pub struct Theme {
parent: Option<Arc<Theme>>,
rules: Vec<Rule>,
}
impl Theme {
pub fn new() -> Self {
Theme::parse("")
}
pub fn parse(s: &str) -> Self {
Theme {
parent: Some(DEFAULT_THEME.clone()),
rules: parse(s),
}
}
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Theme, String> {
let file = try!(File::open(path).map_err(|err| format!("failed to open css: {}", err)));
let mut reader = BufReader::new(file);
let mut css = String::new();
let res = reader.read_to_string(&mut css).map_err(|err| format!("failed to read css: {}", err));
match res {
Ok(_) => Ok(Theme::parse(&css)),
Err(err) => Err(err),
}
}
fn all_rules(&self) -> Vec<Rule> {
if let Some(ref parent) = self.parent {
self.rules.iter().chain(parent.rules.iter()).cloned().collect()
} else {
self.rules.clone()
}
}
pub fn get(&self, property: &str, query: &Selector) -> Option<Value> {
let mut matches: Vec<(bool, Specificity, Value)> = Vec::new();
for rule in self.all_rules().iter().rev() {
let matching_selectors = rule.selectors.iter().filter(|x| x.matches(query)).collect::<Vec<_>>();
if matching_selectors.len() > 0 {
if let Some(decl) = rule.declarations.iter().find(|decl| decl.property == property) {
let highest_specifity = matching_selectors.iter().map(|sel| sel.specificity()).max().unwrap();
matches.push((decl.important, highest_specifity, decl.value.clone()));
}
}
}
matches.sort_by_key(|x| (x.0, x.1));
matches.last().map(|x| x.2.clone())
}
pub fn color(&self, property: &str, query: &Selector) -> Color {
let default = Color { data: 0 };
self.get(property, query).map(|v| v.color().unwrap_or(default)).unwrap_or(default)
}
pub fn uint(&self, property: &str, query: &Selector) -> u32 {
self.get(property, query).map(|v| v.uint().unwrap_or(0)).unwrap_or(0)
}
}
#[derive(Clone, Debug)]
pub struct Rule {
pub selectors: Vec<Selector>,
pub declarations: Vec<Declaration>,
}
#[derive(Clone, Debug)]
pub enum SelectorRelation {
Ancestor(Selector),
Parent(Selector),
}
impl<T: Into<String>> From<T> for Selector {
fn from(t: T) -> Self {
Selector::new(Some(t.into()))
}
}
/// Describes the specificity of a selector.
///
/// The indexes are as follows:
/// 0 - number of IDs (most important)
/// 1 - number of classes and pseudo-classes
/// 2 - number of elements (least important)
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
struct Specificity([u8; 3]);
impl Add<Self> for Specificity {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Specificity([
self.0[0] + rhs.0[0],
self.0[1] + rhs.0[1],
self.0[2] + rhs.0[2],
])
}
}
#[derive(Clone, Debug, Default)]
pub struct Selector {
pub element: Option<String>,
pub classes: HashSet<String>,
pub pseudo_classes: HashSet<String>,
pub relation: Option<Box<SelectorRelation>>,
}
impl Selector {
pub fn new<S: Into<String>>(element: Option<S>) -> Self {
Selector {
element: element.map(|s| s.into()),
classes: HashSet::new(),
pseudo_classes: HashSet::new(),
relation: None,
}
}
fn specificity(&self) -> Specificity {
let s = Specificity([
0,
(self.classes.len() + self.pseudo_classes.len()) as u8,
if self.element.is_some() { 1 } else { 0 }
]);
if let Some(ref relation) = self.relation {
match **relation {
SelectorRelation::Ancestor(ref x) | SelectorRelation::Parent(ref x) => return x.specificity() + s,
}
}
s
}
pub fn matches(&self, other: &Selector) -> bool {
if self.element.is_some() && self.element!= other.element {
return false;
}
if!other.classes.is_superset(&self.classes) {
return false;
}
if!other.pseudo_classes.is_superset(&self.pseudo_classes) {
return false;
}
true
}
pub fn with_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.insert(class.into());
self
}
pub fn without_class<S: Into<String>>(mut self, class: S) -> Self {
self.classes.remove(&class.into());
self
}
pub fn with_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self |
pub fn without_pseudo_class<S: Into<String>>(mut self, pseudo_class: S) -> Self {
self.pseudo_classes.remove(&pseudo_class.into());
self
}
}
impl Selector {
pub fn is_empty(&self) -> bool {
self.element.is_none() && self.classes.is_empty() && self.pseudo_classes.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Declaration {
pub property: String,
pub value: Value,
pub important: bool,
}
#[derive(Clone, Debug)]
pub enum Value {
UInt(u32),
Color(Color),
}
impl Value {
pub fn uint(&self) -> Option<u32> {
match *self {
Value::UInt(x) => Some(x),
_ => None,
}
}
pub fn color(&self) -> Option<Color> {
match *self {
Value::Color(x) => Some(x),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub enum CustomParseError {
InvalidColorName(String),
InvalidColorHex(String),
}
impl<'t> From<CustomParseError> for ParseError<'t, CustomParseError> {
fn from(e: CustomParseError) -> Self {
ParseError::Custom(e)
}
}
struct RuleParser;
impl RuleParser {
fn new() -> Self {
RuleParser {}
}
}
impl<'i> cssparser::QualifiedRuleParser<'i> for RuleParser {
type Prelude = Vec<Selector>;
type QualifiedRule = Rule;
type Error = CustomParseError;
fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-> Result<Self::Prelude, ParseError<'i, Self::Error>> {
let res = parse_selectors(input)?;
Ok(res)
}
fn parse_block<'t>(&mut self, selectors: Self::Prelude, input: &mut Parser<'i, 't>)
-> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
let decl_parser = DeclarationParser {};
let decls = DeclarationListParser::new(input, decl_parser).collect::<Vec<_>>();
for decl in &decls {
match *decl {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", input.slice(e.span.clone()));
}
}
}
let decls = decls.into_iter().filter_map(|decl| decl.ok()).collect();
Ok(Rule {
selectors: selectors,
declarations: decls,
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for RuleParser {
type Prelude = ();
type AtRule = Rule;
type Error = CustomParseError;
}
fn parse_selectors<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Vec<Selector>, ParseError<'i, CustomParseError>> {
let mut selectors = Vec::new();
let mut selector = Selector::default();
let mut first_token_in_selector = true;
while let Ok(t) = input.next() {
match t {
// Element
Token::Ident(ref element_name) => {
if first_token_in_selector {
selector.element = Some(element_name.to_string())
} else {
let mut old_selector = Selector::new(Some(element_name.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Ancestor(old_selector)));
}
}
Token::Delim('>') => {
let mut old_selector = Selector::new(Some(input.expect_ident()?.to_string()));
mem::swap(&mut old_selector, &mut selector);
selector.relation = Some(Box::new(SelectorRelation::Parent(old_selector)));
}
// Any element
Token::Delim('*') => {}
// Class
Token::Delim('.') => {selector.classes.insert(input.expect_ident()?.into_owned());}
// Pseudo-class
Token::Colon => {selector.pseudo_classes.insert(input.expect_ident()?.into_owned());}
// This selector is done, on to the next one
Token::Comma => {
selectors.push(selector);
selector = Selector::default();
first_token_in_selector = true;
continue; // need to continue to avoid `first_token_in_selector` being set to false
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
}
first_token_in_selector = false;
}
selectors.push(selector);
if selectors.iter().any(|sel| sel.relation.is_some()) {
eprintln!("WARNING: Complex selector relations not implemented");
}
Ok(selectors)
}
struct DeclarationParser;
impl<'i> cssparser::DeclarationParser<'i> for DeclarationParser {
type Declaration = Declaration;
type Error = CustomParseError;
fn parse_value<'t>(&mut self, name: CompactCowStr<'i>, input: &mut Parser<'i, 't>) -> Result<Self::Declaration, ParseError<'i, Self::Error>> {
let value = match &*name {
"color" | "border-color" => Value::Color(parse_basic_color(input)?),
"background" | "foreground" => Value::Color(parse_basic_color(input)?),
"border-radius" | "border-width" => {
match input.next()? {
Token::Number { int_value: Some(x), has_sign,.. } if!has_sign && x >= 0 => Value::UInt(x as u32),
t => return Err(BasicParseError::UnexpectedToken(t).into())
}
}
_ => return Err(BasicParseError::UnexpectedToken(input.next()?).into()),
};
Ok(Declaration {
property: name.into_owned(),
value: value,
important: input.try(cssparser::parse_important).is_ok()
})
}
}
impl<'i> cssparser::AtRuleParser<'i> for DeclarationParser {
type Prelude = ();
type AtRule = Declaration;
type Error = CustomParseError;
}
fn css_color(name: &str) -> Option<Color> {
Some(hex(match name {
"transparent" => return Some(Color { data: 0 }),
"black" => 0x000000,
"silver" => 0xc0c0c0,
"gray" | "grey" => 0x808080,
"white" => 0xffffff,
"maroon" => 0x800000,
"red" => 0xff0000,
"purple" => 0x800080,
"fuchsia" => 0xff00ff,
"green" => 0x008000,
"lime" => 0x00ff00,
"olive" => 0x808000,
"yellow" => 0xffff00,
"navy" => 0x000080,
"blue" => 0x0000ff,
"teal" => 0x008080,
"aqua" => 0x00ffff,
_ => return None,
}))
}
fn parse_basic_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, ParseError<'i, CustomParseError>> {
Ok(match input.next()? {
Token::Ident(s) => match css_color(&s) {
Some(color) => color,
None => return Err(CustomParseError::InvalidColorName(s.into_owned()).into()),
},
Token::IDHash(hash) | Token::Hash(hash) => {
match hash.len() {
6 | 8 => {
let mut x = match u32::from_str_radix(&hash, 16) {
Ok(x) => x,
Err(_) => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
};
if hash.len() == 6 {
x |= 0xFF000000;
}
Color { data: x }
},
_ => return Err(CustomParseError::InvalidColorHex(hash.into_owned()).into()),
}
}
t => {
let basic_error = BasicParseError::UnexpectedToken(t);
return Err(basic_error.into());
}
})
}
fn parse(s: &str) -> Vec<Rule> {
let mut input = ParserInput::new(s);
let mut parser = Parser::new(&mut input);
let rule_parser = RuleParser::new();
let rules = {
let rule_list_parser = cssparser::RuleListParser::new_for_stylesheet(&mut parser, rule_parser);
rule_list_parser.collect::<Vec<_>>()
};
for rule in &rules {
match *rule {
Ok(_) => {},
Err(ref e) => {
match e.error {
ParseError::Basic(ref e) => eprintln!("{:?}", e),
ParseError::Custom(ref e) => eprintln!("{:?}", e),
}
println!("Error occured in `{}`", parser.slice(e.span.clone()));
}
}
}
rules.into_iter().filter_map(|rule| rule.ok()).collect()
}
const fn hex(data: u32) -> Color {
Color { data: 0xFF000000 | data }
}
| {
self.pseudo_classes.insert(pseudo_class.into());
self
} | identifier_body |
lib.rs | dli_fbase: *mut c_void, /* Load address of that object. */
dli_sname: *mut c_char, /* Name of nearest symbol. */
dli_saddr: *mut c_void, /* Exact value of nearest symbol. */
//dlerror
}
/* This is the type of elements in `Dl_serinfo', below.
The `dls_name' member points to space in the buffer passed to `dlinfo'. */
struct Dl_serpath
{
dls_name: *mut c_char, /* Name of library search path directory. */
dls_flags: u32, /* Indicates where this directory came from. */
}
/* This is the structure that must be passed (by reference) to `dlinfo' for
the RTLD_DI_SERINFO and RTLD_DI_SERINFOSIZE requests. */
struct Dl_serinfo
{
dls_size: usize, /* Size in bytes of the whole buffer. */
dls_cnt: u32, /* Number of elements in `dls_serpath'. */
dls_serpath: [Dl_serpath;1], /* Actually longer, dls_cnt elements. */
}
//TODO
//Think about changing from c_int to i32 or something
extern "C" {
pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void;
pub fn dlsym(lib_handle: *mut c_void, name: *const c_char) -> *mut c_void;
pub fn dlclose(lib_handle: *mut c_void) -> c_int;
pub fn dlinfo(lib_handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int;
pub fn dlerror() -> *mut c_char;
}
pub struct DyLib(*mut c_void);
pub fn open_lib( lib_path: &str, flag: i32 )->Result<DyLib, String>{unsafe{
//TODO
//Get enums dlopen uses
let shared_lib_handle = dlopen(CString::new(lib_path).unwrap().as_ptr(), flag as c_int);
if shared_lib_handle.is_null() |
else{
Ok( DyLib(shared_lib_handle) )
}
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr());
if _fn.is_null() {
Err("Function name could not be found.".to_string())
}
else{
Ok(_fn as *mut () )
}
}}
pub fn get_error()->String{unsafe{
let error = dlerror();
if error.is_null(){
return "No Error".to_string();
}
else{
CString::from_raw(error).into_string().unwrap()
}
}}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if dlclose(shared_lib_handle.0)!= 0{
println!("Could not properly close shared library.");
}
}}
}
#[cfg(target_os = "windows")]
pub mod dynamic_lib_loading{
use std::os::raw::{c_int, c_void};
extern "C" {
fn LoadLibraryA( path: *const i8 ) -> *mut c_void;
fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void;
fn FreeLibrary( lib: *mut c_void ) -> c_int;
fn GetLastError() -> u32;
}
//TODO
//This is temporary should be replaced by windows enums
pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */
pub struct DyLib(*mut c_void);
pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{
let _path = lib_path.to_string() + "\0";
let lib = LoadLibraryA( _path.as_ptr() as *const i8);
if lib.is_null(){
let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError());
return Err(s);
}
Ok(DyLib(lib as *mut c_void))
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let fn_name = name.to_string() + "\0";
let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut ();
if function.is_null(){
let s = format!("Could not get function \n{:?}", GetLastError());
return Err(s);
}
Ok(function)
}}
pub fn get_error()->String{
"Windows version has not been implemented".to_string()
}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if FreeLibrary(shared_lib_handle.0 as _) == 0{
println!("Could not properly close shared library.");
println!("{}", format!("{:?}", GetLastError()));
}
}}
}
pub mod render_tools{
#[derive(PartialEq, Clone, Debug)]
pub enum RenderType{
Image,
Rectangle,
String,
PrintString,
Empty,
}
impl Default for RenderType{
fn default()->Self{ RenderType::Empty }
}
#[derive(Default)]
pub struct RenderStruct{
pub rendertype : RenderType,
pub x: f32,
pub y: f32,
pub width: f32,
pub height: f32,
pub alpha : f32,
//rect related things
pub filled: bool,
pub color: [f32;3],
//image related things
pub color_buffer: Vec<u8>,
pub rgba_type: RGBA,
pub new_width: Option<f32>,// NOTE Testing out using a factional new width
pub new_height: Option<f32>,// NOTE Testing out using a factional new height
//Stings
pub char_buffer: String,
pub font_size: u32
}
#[derive(Default)]
pub struct RenderInstructions{
pub buffer: Vec<RenderStruct>,
}
//TODO
//This is a BAD name.... do better
#[derive(Clone, Copy, PartialEq)]
pub enum RGBA{
U8rgba,
U8argb,
U8rgb,
Empty,
//More maybe... maybe not
}
impl RenderInstructions{
pub fn clear(&mut self){
self.buffer.clear();
}
pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle,
x: rect[0], y:rect[1], width: rect[2], height: rect[3],
alpha: color[3], filled: filled, color: _color,.. Default::default()});
}
pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){
//TODO
//should size be optional
//shouldn't a good text size be choosen automatically
//TODO
//should color be optional
//shouldn't a good text color be choosen automatically
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y,
alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size,.. Default::default()} );
}
pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){
//TODO
//should x and y be options, Often I want to just draw the image where ever and have it
//automagicly look good with corresponding text
self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32,
new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(),.. Default::default()} );
}
pub fn println(&mut self, string: &str){
let buffer = "> ".to_string() + string;
self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString,
alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19,.. Default::default()} );
}
}
impl Default for RGBA{
fn default()->Self{ RGBA::Empty }
}
#[derive(Clone)]
pub struct Bitmap{
//NOTE BMP should be 400 x 400 to start off.
pub width: i32,
pub height: i32,
pub rgba_type: RGBA,
pub buffer: Vec<u8>,
}
impl Bitmap{
pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{
let _w = w as usize;
let _h = h as usize;
let v = match rgba_type{
RGBA::U8rgba=>{ vec![0u8; _w*_h*4] },
RGBA::U8argb=>{ vec![0u8; _w*_h*4] },
RGBA::U8rgb=>{ vec![0u8; _w*_h*3] },
_=>{ panic!("Not supported"); } //TODO Clean up
};
Bitmap{
width: w,
height: h,
rgba_type: rgba_type,
buffer: v,
}
}
}
pub struct BitmapContainer{
pub initialized : bool,
pub bmp: Option<Bitmap>,
}
}
pub mod memory_tools{
//TODO play around with this maybe
//use std::alloc;
use std::any::TypeId;
const _FIXED_CHAR_BUFFER_SIZE : usize = 128;
#[derive(Copy, Clone)]
pub struct TinyString{
//NOTE
//currently struct vars are public for debugging purposed. They should not be public.
//NOTE
//This should prob be a general tool
pub buffer: [char; _FIXED_CHAR_BUFFER_SIZE],
pub cursor: usize,
}
impl TinyString{
pub fn new()->TinyString{
TinyString{
buffer: ['\0'; _FIXED_CHAR_BUFFER_SIZE],
cursor: 0,
}
}
pub fn get(&self, index: usize)->&char{
&self.buffer[index]
}
pub fn get_mut(&mut self, index: usize)->&mut char{
&mut self.buffer[index]
}
pub fn len(&self)->usize{
self.cursor
}
pub fn push(&mut self, c: char)->Result< (), String>{
if self.len() >= _FIXED_CHAR_BUFFER_SIZE { return Err("Out sized tinystring".to_string()); }
let _i = self.cursor;
self[_i] = c;
self.cursor += 1;
return Ok( () );
}
//TODO
//check meaning of clone and copy in rust
pub fn copystr(&mut self, s: &str){
let mut chars = s.chars();
for _ in 0.. _FIXED_CHAR_BUFFER_SIZE {
match chars.next(){
Some(c)=> {
let _i = self.cursor;
self[_i] = c;
self.cursor += 1;
}
_=> break
}
}
}
//TODO
//check meaning of clone and copy in rust
pub fn copy(&mut self, s: &TinyString){
for i in 0..s.len(){
self[i] = s[i];
}
}
pub fn is_samestr(&self, s: &str)->bool{
let mut chars = s.chars();
if self.len()!= s.len(){ return false; }
for i in 0..self.len(){
if self[i]!= chars.next().unwrap(){ return false; }
}
return true;
}
pub fn is_same(&self, s: &TinyString)->bool{
if self.len()!= s.len(){ return false; }
for i in 0..self.len(){
if self[i]!= s[i]{ return false; }
}
return true;
}
}
impl std::ops::Index<usize> for TinyString{
type Output = char;
fn index(&self, index: usize)->&Self::Output{
self.get(index)
}
}
impl std::ops::IndexMut<usize> for TinyString{
fn index_mut(&mut self, index: usize)->&mut Self::Output{
self.get_mut(index)
}
}
pub trait Storage{
fn get_storage(&mut self)->&mut [u8];
}
impl Storage for GlobalStorage{
fn get_storage(&mut self)->&mut [u8]{
&mut self.storage
}
}
pub struct Ptr<S: Storage>{
//TODO make backend_storage a Generic function that requires trait
ptr: usize,
type_hash: TypeId,
backend_storage: *mut S
}
impl<S: Storage> Ptr<S>{
pub fn deref_mut<T:'static>(&self)->&mut T{unsafe{
if self.type_hash!= TypeId::of::<T>() { panic!("Could not dereference custom pointer do to failed type hash comparison.");}
let gs = self.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[self.ptr] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
impl<S: Storage> std::clone::Clone for Ptr<S>{
fn clone(&self)->Self{
Ptr{
ptr: self.ptr,
type_hash: self.type_hash,
backend_storage: self.backend_storage,
}
}
}
pub struct DyArray<T>{
ptr: Ptr<GlobalStorage>,
length: usize,
capacity: usize,
phantom: std::marker::PhantomData<T>,
}
impl<T:'static> DyArray<T>{
pub fn push(&mut self, v: T) {unsafe{
let gs = self.ptr.backend_storage.as_mut().unwrap();
if self.length >= self.capacity{
let length = self.length * std::mem::size_of::<T>();
let old_ptr = self.ptr.clone();
let new_ptr = gs.realloc::<T>(old_ptr, length, self.capacity);
self.ptr = new_ptr;
return;
}
{
let cursor = self.ptr.ptr + self.length * std::mem::size_of::<T>();
gs.write_to(v, cursor).expect("Global storage could not write to index.");
self.length +=1;
}
}}
pub fn new(gs: &mut GlobalStorage)->DyArray<T>{
DyArray::<T>::with_capacity(gs, 5)
}
pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{
let ptr = gs.alloc_multi_empty::<T>( size );
DyArray{
ptr: ptr,
length: 0,
capacity: size,
phantom: std::marker::PhantomData
}
}
pub fn get(&self, index: usize)->&T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
//TODO
//Convert Global and Local storage into a general storage storage thing where storage can be
//either a fixed or dynamic array.
pub struct GlobalStorage{
pub storage: Vec<u8>,
pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8
reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space
stored_ptr: Vec<Ptr<GlobalStorage>>,
}
impl GlobalStorage{
pub fn new()->GlobalStorage{
GlobalStorage{
storage: Vec::with_capacity(1028*1028*4), //This is still prob too small
storage_filled: Vec::with_capacity(1028*1028*4),
//TODO
//reference needs to store the ptr index TODO
reference: [TinyString::new(); 100],
stored_ptr: Vec::new(),
}
}
pub fn alloc<T:'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{
let size = std::mem::size_of::<T>();
let src = (&v as *const T) as *const u8;
let cursor = self.storage.len();
for i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
self.storage.push(*src.offset(i as isize));
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _};
}}
pub fn alloc_multi_empty<T:'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{
let size = std::mem::size_of::<T>() * multiples;
let cursor = self.storage.len();
for _i in 0..size{
| {
println!("{:?}", get_error());
Err(format!("Shared lib is null! {} Check file path/name.", lib_path))
} | conditional_block |
lib.rs | 32; 4], color: [f32; 4], filled: bool){
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle,
x: rect[0], y:rect[1], width: rect[2], height: rect[3],
alpha: color[3], filled: filled, color: _color,.. Default::default()});
}
pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){
//TODO
//should size be optional
//shouldn't a good text size be choosen automatically
//TODO
//should color be optional
//shouldn't a good text color be choosen automatically
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y,
alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size,.. Default::default()} );
}
pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){
//TODO
//should x and y be options, Often I want to just draw the image where ever and have it
//automagicly look good with corresponding text
self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32,
new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(),.. Default::default()} );
}
pub fn println(&mut self, string: &str){
let buffer = "> ".to_string() + string;
self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString,
alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19,.. Default::default()} );
}
}
impl Default for RGBA{
fn default()->Self{ RGBA::Empty }
}
#[derive(Clone)]
pub struct Bitmap{
//NOTE BMP should be 400 x 400 to start off.
pub width: i32,
pub height: i32,
pub rgba_type: RGBA,
pub buffer: Vec<u8>,
}
impl Bitmap{
pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{
let _w = w as usize;
let _h = h as usize;
let v = match rgba_type{
RGBA::U8rgba=>{ vec![0u8; _w*_h*4] },
RGBA::U8argb=>{ vec![0u8; _w*_h*4] },
RGBA::U8rgb=>{ vec![0u8; _w*_h*3] },
_=>{ panic!("Not supported"); } //TODO Clean up
};
Bitmap{
width: w,
height: h,
rgba_type: rgba_type,
buffer: v,
}
}
}
pub struct BitmapContainer{
pub initialized : bool,
pub bmp: Option<Bitmap>,
}
}
pub mod memory_tools{
//TODO play around with this maybe
//use std::alloc;
use std::any::TypeId;
const _FIXED_CHAR_BUFFER_SIZE : usize = 128;
#[derive(Copy, Clone)]
pub struct TinyString{
//NOTE
//currently struct vars are public for debugging purposed. They should not be public.
//NOTE
//This should prob be a general tool
pub buffer: [char; _FIXED_CHAR_BUFFER_SIZE],
pub cursor: usize,
}
impl TinyString{
pub fn new()->TinyString{
TinyString{
buffer: ['\0'; _FIXED_CHAR_BUFFER_SIZE],
cursor: 0,
}
}
pub fn get(&self, index: usize)->&char{
&self.buffer[index]
}
pub fn get_mut(&mut self, index: usize)->&mut char{
&mut self.buffer[index]
}
pub fn len(&self)->usize{
self.cursor
}
pub fn push(&mut self, c: char)->Result< (), String>{
if self.len() >= _FIXED_CHAR_BUFFER_SIZE { return Err("Out sized tinystring".to_string()); }
let _i = self.cursor;
self[_i] = c;
self.cursor += 1;
return Ok( () );
}
//TODO
//check meaning of clone and copy in rust
pub fn copystr(&mut self, s: &str){
let mut chars = s.chars();
for _ in 0.. _FIXED_CHAR_BUFFER_SIZE {
match chars.next(){
Some(c)=> {
let _i = self.cursor;
self[_i] = c;
self.cursor += 1;
}
_=> break
}
}
}
//TODO
//check meaning of clone and copy in rust
pub fn copy(&mut self, s: &TinyString){
for i in 0..s.len(){
self[i] = s[i];
}
}
pub fn is_samestr(&self, s: &str)->bool{
let mut chars = s.chars();
if self.len()!= s.len(){ return false; }
for i in 0..self.len(){
if self[i]!= chars.next().unwrap(){ return false; }
}
return true;
}
pub fn is_same(&self, s: &TinyString)->bool{
if self.len()!= s.len(){ return false; }
for i in 0..self.len(){
if self[i]!= s[i]{ return false; }
}
return true;
}
}
impl std::ops::Index<usize> for TinyString{
type Output = char;
fn index(&self, index: usize)->&Self::Output{
self.get(index)
}
}
impl std::ops::IndexMut<usize> for TinyString{
fn index_mut(&mut self, index: usize)->&mut Self::Output{
self.get_mut(index)
}
}
pub trait Storage{
fn get_storage(&mut self)->&mut [u8];
}
impl Storage for GlobalStorage{
fn get_storage(&mut self)->&mut [u8]{
&mut self.storage
}
}
pub struct Ptr<S: Storage>{
//TODO make backend_storage a Generic function that requires trait
ptr: usize,
type_hash: TypeId,
backend_storage: *mut S
}
impl<S: Storage> Ptr<S>{
pub fn deref_mut<T:'static>(&self)->&mut T{unsafe{
if self.type_hash!= TypeId::of::<T>() { panic!("Could not dereference custom pointer do to failed type hash comparison.");}
let gs = self.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[self.ptr] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
impl<S: Storage> std::clone::Clone for Ptr<S>{
fn clone(&self)->Self{
Ptr{
ptr: self.ptr,
type_hash: self.type_hash,
backend_storage: self.backend_storage,
}
}
}
pub struct DyArray<T>{
ptr: Ptr<GlobalStorage>,
length: usize,
capacity: usize,
phantom: std::marker::PhantomData<T>,
}
impl<T:'static> DyArray<T>{
pub fn push(&mut self, v: T) {unsafe{
let gs = self.ptr.backend_storage.as_mut().unwrap();
if self.length >= self.capacity{
let length = self.length * std::mem::size_of::<T>();
let old_ptr = self.ptr.clone();
let new_ptr = gs.realloc::<T>(old_ptr, length, self.capacity);
self.ptr = new_ptr;
return;
}
{
let cursor = self.ptr.ptr + self.length * std::mem::size_of::<T>();
gs.write_to(v, cursor).expect("Global storage could not write to index.");
self.length +=1;
}
}}
pub fn new(gs: &mut GlobalStorage)->DyArray<T>{
DyArray::<T>::with_capacity(gs, 5)
}
pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{
let ptr = gs.alloc_multi_empty::<T>( size );
DyArray{
ptr: ptr,
length: 0,
capacity: size,
phantom: std::marker::PhantomData
}
}
pub fn get(&self, index: usize)->&T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
//TODO
//Convert Global and Local storage into a general storage storage thing where storage can be
//either a fixed or dynamic array.
pub struct GlobalStorage{
pub storage: Vec<u8>,
pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8
reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space
stored_ptr: Vec<Ptr<GlobalStorage>>,
}
impl GlobalStorage{
pub fn new()->GlobalStorage{
GlobalStorage{
storage: Vec::with_capacity(1028*1028*4), //This is still prob too small
storage_filled: Vec::with_capacity(1028*1028*4),
//TODO
//reference needs to store the ptr index TODO
reference: [TinyString::new(); 100],
stored_ptr: Vec::new(),
}
}
pub fn alloc<T:'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{
let size = std::mem::size_of::<T>();
let src = (&v as *const T) as *const u8;
let cursor = self.storage.len();
for i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
self.storage.push(*src.offset(i as isize));
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _};
}}
pub fn alloc_multi_empty<T:'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{
let size = std::mem::size_of::<T>() * multiples;
let cursor = self.storage.len();
for _i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
self.storage.push(0);
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _};
}
pub fn realloc<T>(&mut self, ptr: Ptr<GlobalStorage>, index_back: usize, additional_space: usize)->Ptr<GlobalStorage>{
//TODO
//SLOW SPEED UP
let cursor = self.storage.len();
let index_front = ptr.ptr;
for i in index_front..index_back{
let temp = self.storage[i];
self.storage_filled[i] = false;
self.storage.push(temp);
self.storage_filled.push(true);
}
for _i in 0..additional_space{
self.storage.push(0);
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: ptr.type_hash, backend_storage: self as *mut _};
}
pub unsafe fn write_to<T:'static>(&mut self, v: T, at_index: usize)->Result<(),String>{
let size = std::mem::size_of::<T>();
let src = (&v as *const T) as *const u8;
if at_index >= self.storage.len() {
return Err("Writing outside the bounds of memory allocated to global storage".to_string());
}
let cursor = at_index;
for i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
if!self.storage_filled[cursor+i] { panic!("Storage has not allocated this memory.") }
self.storage[cursor+i] = *src.offset(i as isize);
}
return Ok(());
}
pub fn store<T:'static>(&mut self, v: T, name: &str)->Result<(), String>{
if name.len() > _FIXED_CHAR_BUFFER_SIZE {
return Err(format!("storage name is too damn long. Name should be {} chars long.", _FIXED_CHAR_BUFFER_SIZE));
}
let cursor = self.stored_ptr.len();
for it in self.reference.iter() {
if it.is_samestr(name){
return Err(format!("Global Storage name collision: {}", name));
}
}
self.reference[cursor].copystr( name );
let ptr = self.alloc(v);
self.stored_ptr.push(ptr);
return Ok(());
}
pub fn get<T:'static>(&mut self, name: &str)->Result<&mut T, String>{
let mut isgood = false;
let mut ptr_index = 0;
for (i, it) in self.reference.iter().enumerate() {
if it.is_samestr(name){
ptr_index = i;
isgood = true;
}
}
if isgood == false { return Err(format!("Name not found in Global Storage: {}", name)); }
let ptr = &self.stored_ptr[ptr_index];
return Ok(ptr.deref_mut::<T>());
}
//
}
pub struct LocalStorage{
//NOTE
//This seems to be a good idea when it comes to interactive panels
//However I'm not sure about the usefulness else where....
//
//
//Why should the local buffer be fixed sized. This doesn't really make sense.
pub interactive: bool,
pub storage: GlobalStorage,
}
impl LocalStorage{
pub fn new()->LocalStorage{
LocalStorage{
interactive: false,
storage: GlobalStorage::new(),
}
}
}
}
pub mod interaction_tools{
pub enum KeyboardEnum{
Rightarrow,
Leftarrow,
Uparrow,
Downarrow,
Enter,
Default
}
impl Default for KeyboardEnum{
fn default()->Self{ KeyboardEnum::Default }
}
pub enum ButtonStatus{
Up,
Down,
Default
}
impl Default for ButtonStatus{
fn default()->Self{ ButtonStatus::Default }
}
#[derive(Default)]
pub struct InteractiveInfo{
//TODO add some frame info
pub infocus: bool,
pub mouse_x: f32,
pub mouse_y: f32,
pub text_key_pressed: char,
pub keyboard_key: Vec<KeyboardEnum>,
pub keyboard_key_status: Vec<ButtonStatus>,
pub frames: u64,
}
}
#[test]
fn globalstorage_alloc_and_store(){
//TODO rename
use memory_tools::GlobalStorage;
let mut gls = GlobalStorage::new();
{
let mut a = [10u8; 4];
gls.store(a, "a");
}
let mut b = [10u8; 4];
assert_eq!(b, *gls.get::<[u8;4]>("a").unwrap());
}
#[test]
fn globalstorage_vec(){
//TODO rename
use memory_tools::{GlobalStorage, DyArray};
let mut gls = GlobalStorage::new();
let mut dy = DyArray::<u32>::new(&mut gls);
dy.push(12);
dy.push(123);
dy.push(1231);
//let a = dy.get(0);
//assert_eq!(12, *a);
println!("print test");
let a = dy.get(1);
assert_eq!(123, *a);
let a = dy.get(2);
assert_eq!(1231, *a);
/* Does not compile for some reason
* I need to inform the rust folks
assert_eq!(12, *(dy.get(0)));
assert_eq!(123, *(dy,get(1)));
assert_eq!(1231, *(dy.get(2)));
*/
}
#[test]
fn | global_storage_vec2 | identifier_name |
|
lib.rs | dli_fbase: *mut c_void, /* Load address of that object. */
dli_sname: *mut c_char, /* Name of nearest symbol. */
dli_saddr: *mut c_void, /* Exact value of nearest symbol. */
//dlerror
}
/* This is the type of elements in `Dl_serinfo', below.
The `dls_name' member points to space in the buffer passed to `dlinfo'. */
struct Dl_serpath
{
dls_name: *mut c_char, /* Name of library search path directory. */
dls_flags: u32, /* Indicates where this directory came from. */
}
/* This is the structure that must be passed (by reference) to `dlinfo' for
the RTLD_DI_SERINFO and RTLD_DI_SERINFOSIZE requests. */
struct Dl_serinfo
{
dls_size: usize, /* Size in bytes of the whole buffer. */
dls_cnt: u32, /* Number of elements in `dls_serpath'. */
dls_serpath: [Dl_serpath;1], /* Actually longer, dls_cnt elements. */
}
//TODO
//Think about changing from c_int to i32 or something
extern "C" {
pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void;
pub fn dlsym(lib_handle: *mut c_void, name: *const c_char) -> *mut c_void;
pub fn dlclose(lib_handle: *mut c_void) -> c_int;
pub fn dlinfo(lib_handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int;
pub fn dlerror() -> *mut c_char;
}
pub struct DyLib(*mut c_void);
pub fn open_lib( lib_path: &str, flag: i32 )->Result<DyLib, String>{unsafe{
//TODO
//Get enums dlopen uses
let shared_lib_handle = dlopen(CString::new(lib_path).unwrap().as_ptr(), flag as c_int);
if shared_lib_handle.is_null(){
println!("{:?}", get_error());
Err(format!("Shared lib is null! {} Check file path/name.", lib_path))
}
else{
Ok( DyLib(shared_lib_handle) )
}
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let _fn = dlsym(shared_lib_handle.0, CString::new(name).unwrap().as_ptr());
if _fn.is_null() {
Err("Function name could not be found.".to_string())
}
else{
Ok(_fn as *mut () )
}
}}
pub fn get_error()->String{unsafe{
let error = dlerror();
if error.is_null(){
return "No Error".to_string();
}
else{
CString::from_raw(error).into_string().unwrap()
}
}}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if dlclose(shared_lib_handle.0)!= 0{
println!("Could not properly close shared library.");
}
}}
}
#[cfg(target_os = "windows")]
pub mod dynamic_lib_loading{
use std::os::raw::{c_int, c_void};
extern "C" {
fn LoadLibraryA( path: *const i8 ) -> *mut c_void;
fn GetProcAddress( lib: *mut c_void, name: *const i8 ) -> *mut c_void;
fn FreeLibrary( lib: *mut c_void ) -> c_int;
fn GetLastError() -> u32;
}
//TODO
//This is temporary should be replaced by windows enums
pub const RTLD_LAZY : i32 = 0x00001; /* Lazy function call binding. */
pub struct DyLib(*mut c_void);
pub fn open_lib( lib_path: &str, _flag: i32 )->Result<DyLib, String>{unsafe{
let _path = lib_path.to_string() + "\0";
let lib = LoadLibraryA( _path.as_ptr() as *const i8);
if lib.is_null(){
let s = format!("Could not open lib \n{:?}\n\n For more info => https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes", GetLastError());
return Err(s);
}
Ok(DyLib(lib as *mut c_void))
}}
//Example
//let function : fn()->i32= transmute_copy((dlsym(shared_lib_handle, CString::new(name).unwrap().as_ptr()) as *mut ()).as_mut());
pub fn get_fn( shared_lib_handle: &DyLib, name: &str)-> Result<*mut (), String>{ unsafe{
let fn_name = name.to_string() + "\0";
let function = GetProcAddress(shared_lib_handle.0 as _, fn_name.as_ptr() as *const i8) as *mut ();
if function.is_null(){
let s = format!("Could not get function \n{:?}", GetLastError());
return Err(s);
}
Ok(function)
}}
pub fn get_error()->String{
"Windows version has not been implemented".to_string()
}
pub fn close_lib(shared_lib_handle: &DyLib){unsafe{
if FreeLibrary(shared_lib_handle.0 as _) == 0{
println!("Could not properly close shared library.");
println!("{}", format!("{:?}", GetLastError()));
}
}}
}
pub mod render_tools{
#[derive(PartialEq, Clone, Debug)]
pub enum RenderType{
Image,
Rectangle,
String,
PrintString,
Empty,
}
impl Default for RenderType{
fn default()->Self{ RenderType::Empty }
}
#[derive(Default)]
pub struct RenderStruct{
pub rendertype : RenderType,
pub x: f32,
pub y: f32,
pub width: f32,
pub height: f32,
pub alpha : f32,
//rect related things
pub filled: bool,
pub color: [f32;3], | pub new_width: Option<f32>,// NOTE Testing out using a factional new width
pub new_height: Option<f32>,// NOTE Testing out using a factional new height
//Stings
pub char_buffer: String,
pub font_size: u32
}
#[derive(Default)]
pub struct RenderInstructions{
pub buffer: Vec<RenderStruct>,
}
//TODO
//This is a BAD name.... do better
#[derive(Clone, Copy, PartialEq)]
pub enum RGBA{
U8rgba,
U8argb,
U8rgb,
Empty,
//More maybe... maybe not
}
impl RenderInstructions{
pub fn clear(&mut self){
self.buffer.clear();
}
pub fn draw_rect(&mut self, rect: [f32; 4], color: [f32; 4], filled: bool){
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{rendertype: RenderType::Rectangle,
x: rect[0], y:rect[1], width: rect[2], height: rect[3],
alpha: color[3], filled: filled, color: _color,.. Default::default()});
}
pub fn draw_string(&mut self, s: &str, color: [f32; 4], size: u32, x: f32, y: f32 ){
//TODO
//should size be optional
//shouldn't a good text size be choosen automatically
//TODO
//should color be optional
//shouldn't a good text color be choosen automatically
let _color = [color[0], color[1], color[2]];
self.buffer.push( RenderStruct{ rendertype: RenderType::String, x: x, y: y,
alpha: color[3], color: _color, char_buffer: s.to_string(), font_size: size,.. Default::default()} );
}
pub fn draw_bmp(&mut self, bmp: &Bitmap, x: f32, y: f32, alpha: f32, w: Option<f32>, h: Option<f32>){
//TODO
//should x and y be options, Often I want to just draw the image where ever and have it
//automagicly look good with corresponding text
self.buffer.push( RenderStruct{rendertype: RenderType::Image, alpha: alpha, x: x, y: y, width: bmp.width as f32, height: bmp.height as f32,
new_width: w, new_height: h, rgba_type: bmp.rgba_type, color_buffer: bmp.buffer.clone(),.. Default::default()} );
}
pub fn println(&mut self, string: &str){
let buffer = "> ".to_string() + string;
self.buffer.push( RenderStruct{ rendertype: RenderType::PrintString,
alpha: 1.0, color: [1.0, 1.0, 1.0], char_buffer: buffer, font_size: 19,.. Default::default()} );
}
}
impl Default for RGBA{
fn default()->Self{ RGBA::Empty }
}
#[derive(Clone)]
pub struct Bitmap{
//NOTE BMP should be 400 x 400 to start off.
pub width: i32,
pub height: i32,
pub rgba_type: RGBA,
pub buffer: Vec<u8>,
}
impl Bitmap{
pub fn new(w: i32, h: i32, rgba_type: RGBA)->Bitmap{
let _w = w as usize;
let _h = h as usize;
let v = match rgba_type{
RGBA::U8rgba=>{ vec![0u8; _w*_h*4] },
RGBA::U8argb=>{ vec![0u8; _w*_h*4] },
RGBA::U8rgb=>{ vec![0u8; _w*_h*3] },
_=>{ panic!("Not supported"); } //TODO Clean up
};
Bitmap{
width: w,
height: h,
rgba_type: rgba_type,
buffer: v,
}
}
}
pub struct BitmapContainer{
pub initialized : bool,
pub bmp: Option<Bitmap>,
}
}
pub mod memory_tools{
//TODO play around with this maybe
//use std::alloc;
use std::any::TypeId;
const _FIXED_CHAR_BUFFER_SIZE : usize = 128;
#[derive(Copy, Clone)]
pub struct TinyString{
//NOTE
//currently struct vars are public for debugging purposed. They should not be public.
//NOTE
//This should prob be a general tool
pub buffer: [char; _FIXED_CHAR_BUFFER_SIZE],
pub cursor: usize,
}
impl TinyString{
pub fn new()->TinyString{
TinyString{
buffer: ['\0'; _FIXED_CHAR_BUFFER_SIZE],
cursor: 0,
}
}
pub fn get(&self, index: usize)->&char{
&self.buffer[index]
}
pub fn get_mut(&mut self, index: usize)->&mut char{
&mut self.buffer[index]
}
pub fn len(&self)->usize{
self.cursor
}
pub fn push(&mut self, c: char)->Result< (), String>{
if self.len() >= _FIXED_CHAR_BUFFER_SIZE { return Err("Out sized tinystring".to_string()); }
let _i = self.cursor;
self[_i] = c;
self.cursor += 1;
return Ok( () );
}
//TODO
//check meaning of clone and copy in rust
pub fn copystr(&mut self, s: &str){
let mut chars = s.chars();
for _ in 0.. _FIXED_CHAR_BUFFER_SIZE {
match chars.next(){
Some(c)=> {
let _i = self.cursor;
self[_i] = c;
self.cursor += 1;
}
_=> break
}
}
}
//TODO
//check meaning of clone and copy in rust
pub fn copy(&mut self, s: &TinyString){
for i in 0..s.len(){
self[i] = s[i];
}
}
pub fn is_samestr(&self, s: &str)->bool{
let mut chars = s.chars();
if self.len()!= s.len(){ return false; }
for i in 0..self.len(){
if self[i]!= chars.next().unwrap(){ return false; }
}
return true;
}
pub fn is_same(&self, s: &TinyString)->bool{
if self.len()!= s.len(){ return false; }
for i in 0..self.len(){
if self[i]!= s[i]{ return false; }
}
return true;
}
}
impl std::ops::Index<usize> for TinyString{
type Output = char;
fn index(&self, index: usize)->&Self::Output{
self.get(index)
}
}
impl std::ops::IndexMut<usize> for TinyString{
fn index_mut(&mut self, index: usize)->&mut Self::Output{
self.get_mut(index)
}
}
pub trait Storage{
fn get_storage(&mut self)->&mut [u8];
}
impl Storage for GlobalStorage{
fn get_storage(&mut self)->&mut [u8]{
&mut self.storage
}
}
pub struct Ptr<S: Storage>{
//TODO make backend_storage a Generic function that requires trait
ptr: usize,
type_hash: TypeId,
backend_storage: *mut S
}
impl<S: Storage> Ptr<S>{
pub fn deref_mut<T:'static>(&self)->&mut T{unsafe{
if self.type_hash!= TypeId::of::<T>() { panic!("Could not dereference custom pointer do to failed type hash comparison.");}
let gs = self.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[self.ptr] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
impl<S: Storage> std::clone::Clone for Ptr<S>{
fn clone(&self)->Self{
Ptr{
ptr: self.ptr,
type_hash: self.type_hash,
backend_storage: self.backend_storage,
}
}
}
pub struct DyArray<T>{
ptr: Ptr<GlobalStorage>,
length: usize,
capacity: usize,
phantom: std::marker::PhantomData<T>,
}
impl<T:'static> DyArray<T>{
pub fn push(&mut self, v: T) {unsafe{
let gs = self.ptr.backend_storage.as_mut().unwrap();
if self.length >= self.capacity{
let length = self.length * std::mem::size_of::<T>();
let old_ptr = self.ptr.clone();
let new_ptr = gs.realloc::<T>(old_ptr, length, self.capacity);
self.ptr = new_ptr;
return;
}
{
let cursor = self.ptr.ptr + self.length * std::mem::size_of::<T>();
gs.write_to(v, cursor).expect("Global storage could not write to index.");
self.length +=1;
}
}}
pub fn new(gs: &mut GlobalStorage)->DyArray<T>{
DyArray::<T>::with_capacity(gs, 5)
}
pub fn with_capacity(gs: &mut GlobalStorage, size: usize)->DyArray<T>{
let ptr = gs.alloc_multi_empty::<T>( size );
DyArray{
ptr: ptr,
length: 0,
capacity: size,
phantom: std::marker::PhantomData
}
}
pub fn get(&self, index: usize)->&T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
pub fn get_mut(&mut self, index: usize)->&mut T{unsafe{
if index > self.length {
panic!("Index bounds error.");
}
let base = self.ptr.ptr;
let address = base + index * std::mem::size_of::<T>();
let gs = self.ptr.backend_storage.as_mut().unwrap();
((&mut gs.get_storage()[address] as *mut u8 ) as *mut T).as_mut().unwrap()
}}
}
//TODO
//Convert Global and Local storage into a general storage storage thing where storage can be
//either a fixed or dynamic array.
pub struct GlobalStorage{
pub storage: Vec<u8>,
pub storage_filled: Vec<bool>, //TODO speace footprint improvement use bits in u8
reference: [TinyString;100], //This is fixed size because I really want to stop myself from over populating the global space
stored_ptr: Vec<Ptr<GlobalStorage>>,
}
impl GlobalStorage{
pub fn new()->GlobalStorage{
GlobalStorage{
storage: Vec::with_capacity(1028*1028*4), //This is still prob too small
storage_filled: Vec::with_capacity(1028*1028*4),
//TODO
//reference needs to store the ptr index TODO
reference: [TinyString::new(); 100],
stored_ptr: Vec::new(),
}
}
pub fn alloc<T:'static>(&mut self, v: T)->Ptr<GlobalStorage>{unsafe{
let size = std::mem::size_of::<T>();
let src = (&v as *const T) as *const u8;
let cursor = self.storage.len();
for i in 0..size{
//TODO
//SLOW SPEED ME UP
//I don't think I want to be pushing every thiem like this
//TODO
//byte alignments
self.storage.push(*src.offset(i as isize));
self.storage_filled.push(true);
}
return Ptr{ ptr: cursor, type_hash: TypeId::of::<T>(), backend_storage: self as *mut _};
}}
pub fn alloc_multi_empty<T:'static>(&mut self, multiples: usize)->Ptr<GlobalStorage>{
let size = std::mem::size_of::<T>() * multiples;
let cursor = self.storage.len();
for _i in 0..size{
|
//image related things
pub color_buffer: Vec<u8>,
pub rgba_type: RGBA, | random_line_split |
map_loader.rs | use serde_json::Value;
use specs::prelude::*;
use std::collections::HashMap;
use std::path::Path;
use super::super::super::prelude::{
hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color,
GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object,
Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2,
};
/// The result of loading one or more Tiled layers into the ECS.
pub struct LoadedLayers {
/// All the top-level entities loaded
pub top_level_entities: Vec<Entity>,
/// A HashMap of all entities loaded within a layer group, keyed by the group
/// layer's name.
pub groups: HashMap<String, Vec<Entity>>,
}
impl LoadedLayers {
pub fn new() -> LoadedLayers {
LoadedLayers {
top_level_entities: vec![],
groups: HashMap::new(),
}
}
pub fn all_entities(&self) -> Vec<Entity> {
let mut tops = self.top_level_entities.clone();
let mut groups: Vec<Entity> =
self.groups.values().flat_map(|es| es.clone()).collect();
tops.append(&mut groups);
tops
}
pub fn append_entities(&mut self, other: LoadedLayers) {
let other_tops = other.top_level_entities.into_iter();
let other_groups = other.groups.into_iter();
self.top_level_entities.extend(other_tops);
self.groups.extend(other_groups);
}
}
pub struct MapLoader<'a> {
loaded_maps: HashMap<String, Tiledmap>,
pub z_level: ZLevel,
pub world: &'a mut World,
pub origin: V2,
pub layer_group: Option<String>,
pub sprite: Option<Entity>,
}
impl<'a> MapLoader<'a> {
pub fn load_it(file: String, lazy: &LazyUpdate) {
lazy.exec_mut(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if!self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) { | }
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc!= 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
ents.push(ent);
Ok(ents)
})
}
/// ## Loading objects
/// Load a top level object.
fn load_top_level_object(
&mut self,
object: &Object,
map: &Tiledmap,
) -> Result<Entity, String> {
let msg_name = object
.name
.non_empty()
.map(|s| s.clone())
.unwrap_or("unnamed".to_string());
println!("Encountered top object '{}'", msg_name);
match object.get_deep_type(map).as_str() {
"character" => {
// Load a character into the game world
let mut rec = ToonRecord::read(map, object)?;
rec.attributes.attribs = rec
.attributes
.attribs
.into_iter()
.map(|a| match a {
Attribute::Position(p) => {
Attribute::Position(Position(p.0 + self.origin))
}
a => a,
})
.collect();
Ok(rec.into_ecs(self.world, self.z_level))
}
"item" => {
// Load an item into the game world (not an inventory)
let mut rec = ItemRecord::read(map, object)?;
// Update the position to be offset by the origin passed in.
rec
.attributes
.position_mut()
.map(|pos| pos.0 += self.origin);
Ok(rec.into_ecs(self.world, self.z_level))
}
"action" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|pos| pos.0 += self.origin);
let _action = attributes.action().ok_or(format!(
"Could not read action {:?}\nDid read:\n{:?}",
object, attributes
))?;
println!("Creating action:\n{:?}", attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
}
"sprite" => Sprite::read(self, map, object),
"zone" | "fence" | "step_fence" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin + V2::new(0.0, object.height);
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"point" | "sound" | "music" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin;
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"barrier" => {
let mut attributes = Attributes::read(map, object)?;
let position = attributes
.position_mut()
.expect("Barrier object has no position");
position.0 = self.origin;
Ok(attributes.into_ecs(self.world, self.z_level))
}
ty => {
let gid = object.gid.clone();
if let Some(gid) = gid {
// Tiled tiles' origin is at the bottom of the tile, not the top
let y = object.y - object.height;
let p = self.origin + V2::new(object.x, y);
let size = (object.width as u32, object.height as u32);
let mut attribs = Attributes::read_gid(map, &gid, Some(size))?;
attribs.push(Attribute::Position(Position(p)));
let props = object
.properties
.iter()
.map(|p| (&p.name, p))
.collect::<HashMap<_, _>>();
let mut prop_attribs = Attributes::read_properties(&props)?;
attribs.append(&mut prop_attribs);
let attributes = Attributes { attribs };
println!(" {:?} with attributes:{:?}", ty, attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
} else {
if object.text.len() > 0 {
// This is a text object
let mut attribs = Attributes::read(map, object)?;
let p = attribs.position_mut().expect("Text must have a Position");
p.0 += self.origin;
println!(
" {:?} with attributes:{:?} and z_level:{:?}",
ty, attribs, self.z_level
);
Ok(attribs.into_ecs(self.world, self.z_level))
} else {
Err(format!("Unsupported object\n{:?}", object))
}
}
}
}
}
} | if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
} | random_line_split |
map_loader.rs | use serde_json::Value;
use specs::prelude::*;
use std::collections::HashMap;
use std::path::Path;
use super::super::super::prelude::{
hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color,
GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object,
Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2,
};
/// The result of loading one or more Tiled layers into the ECS.
pub struct LoadedLayers {
/// All the top-level entities loaded
pub top_level_entities: Vec<Entity>,
/// A HashMap of all entities loaded within a layer group, keyed by the group
/// layer's name.
pub groups: HashMap<String, Vec<Entity>>,
}
impl LoadedLayers {
pub fn new() -> LoadedLayers {
LoadedLayers {
top_level_entities: vec![],
groups: HashMap::new(),
}
}
pub fn all_entities(&self) -> Vec<Entity> {
let mut tops = self.top_level_entities.clone();
let mut groups: Vec<Entity> =
self.groups.values().flat_map(|es| es.clone()).collect();
tops.append(&mut groups);
tops
}
pub fn append_entities(&mut self, other: LoadedLayers) {
let other_tops = other.top_level_entities.into_iter();
let other_groups = other.groups.into_iter();
self.top_level_entities.extend(other_tops);
self.groups.extend(other_groups);
}
}
pub struct MapLoader<'a> {
loaded_maps: HashMap<String, Tiledmap>,
pub z_level: ZLevel,
pub world: &'a mut World,
pub origin: V2,
pub layer_group: Option<String>,
pub sprite: Option<Entity>,
}
impl<'a> MapLoader<'a> {
pub fn | (file: String, lazy: &LazyUpdate) {
lazy.exec_mut(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if!self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc!= 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
ents.push(ent);
Ok(ents)
})
}
/// ## Loading objects
/// Load a top level object.
fn load_top_level_object(
&mut self,
object: &Object,
map: &Tiledmap,
) -> Result<Entity, String> {
let msg_name = object
.name
.non_empty()
.map(|s| s.clone())
.unwrap_or("unnamed".to_string());
println!("Encountered top object '{}'", msg_name);
match object.get_deep_type(map).as_str() {
"character" => {
// Load a character into the game world
let mut rec = ToonRecord::read(map, object)?;
rec.attributes.attribs = rec
.attributes
.attribs
.into_iter()
.map(|a| match a {
Attribute::Position(p) => {
Attribute::Position(Position(p.0 + self.origin))
}
a => a,
})
.collect();
Ok(rec.into_ecs(self.world, self.z_level))
}
"item" => {
// Load an item into the game world (not an inventory)
let mut rec = ItemRecord::read(map, object)?;
// Update the position to be offset by the origin passed in.
rec
.attributes
.position_mut()
.map(|pos| pos.0 += self.origin);
Ok(rec.into_ecs(self.world, self.z_level))
}
"action" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|pos| pos.0 += self.origin);
let _action = attributes.action().ok_or(format!(
"Could not read action {:?}\nDid read:\n{:?}",
object, attributes
))?;
println!("Creating action:\n{:?}", attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
}
"sprite" => Sprite::read(self, map, object),
"zone" | "fence" | "step_fence" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin + V2::new(0.0, object.height);
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"point" | "sound" | "music" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin;
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"barrier" => {
let mut attributes = Attributes::read(map, object)?;
let position = attributes
.position_mut()
.expect("Barrier object has no position");
position.0 = self.origin;
Ok(attributes.into_ecs(self.world, self.z_level))
}
ty => {
let gid = object.gid.clone();
if let Some(gid) = gid {
// Tiled tiles' origin is at the bottom of the tile, not the top
let y = object.y - object.height;
let p = self.origin + V2::new(object.x, y);
let size = (object.width as u32, object.height as u32);
let mut attribs = Attributes::read_gid(map, &gid, Some(size))?;
attribs.push(Attribute::Position(Position(p)));
let props = object
.properties
.iter()
.map(|p| (&p.name, p))
.collect::<HashMap<_, _>>();
let mut prop_attribs = Attributes::read_properties(&props)?;
attribs.append(&mut prop_attribs);
let attributes = Attributes { attribs };
println!(" {:?} with attributes:{:?}", ty, attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
} else {
if object.text.len() > 0 {
// This is a text object
let mut attribs = Attributes::read(map, object)?;
let p = attribs.position_mut().expect("Text must have a Position");
p.0 += self.origin;
println!(
" {:?} with attributes:{:?} and z_level:{:?}",
ty, attribs, self.z_level
);
Ok(attribs.into_ecs(self.world, self.z_level))
} else {
Err(format!("Unsupported object\n{:?}", object))
}
}
}
}
}
}
| load_it | identifier_name |
map_loader.rs | use serde_json::Value;
use specs::prelude::*;
use std::collections::HashMap;
use std::path::Path;
use super::super::super::prelude::{
hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color,
GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object,
Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2,
};
/// The result of loading one or more Tiled layers into the ECS.
pub struct LoadedLayers {
/// All the top-level entities loaded
pub top_level_entities: Vec<Entity>,
/// A HashMap of all entities loaded within a layer group, keyed by the group
/// layer's name.
pub groups: HashMap<String, Vec<Entity>>,
}
impl LoadedLayers {
pub fn new() -> LoadedLayers {
LoadedLayers {
top_level_entities: vec![],
groups: HashMap::new(),
}
}
pub fn all_entities(&self) -> Vec<Entity> {
let mut tops = self.top_level_entities.clone();
let mut groups: Vec<Entity> =
self.groups.values().flat_map(|es| es.clone()).collect();
tops.append(&mut groups);
tops
}
pub fn append_entities(&mut self, other: LoadedLayers) {
let other_tops = other.top_level_entities.into_iter();
let other_groups = other.groups.into_iter();
self.top_level_entities.extend(other_tops);
self.groups.extend(other_groups);
}
}
pub struct MapLoader<'a> {
loaded_maps: HashMap<String, Tiledmap>,
pub z_level: ZLevel,
pub world: &'a mut World,
pub origin: V2,
pub layer_group: Option<String>,
pub sprite: Option<Entity>,
}
impl<'a> MapLoader<'a> {
pub fn load_it(file: String, lazy: &LazyUpdate) {
lazy.exec_mut(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if!self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc!= 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => |
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
ents.push(ent);
Ok(ents)
})
}
/// ## Loading objects
/// Load a top level object.
fn load_top_level_object(
&mut self,
object: &Object,
map: &Tiledmap,
) -> Result<Entity, String> {
let msg_name = object
.name
.non_empty()
.map(|s| s.clone())
.unwrap_or("unnamed".to_string());
println!("Encountered top object '{}'", msg_name);
match object.get_deep_type(map).as_str() {
"character" => {
// Load a character into the game world
let mut rec = ToonRecord::read(map, object)?;
rec.attributes.attribs = rec
.attributes
.attribs
.into_iter()
.map(|a| match a {
Attribute::Position(p) => {
Attribute::Position(Position(p.0 + self.origin))
}
a => a,
})
.collect();
Ok(rec.into_ecs(self.world, self.z_level))
}
"item" => {
// Load an item into the game world (not an inventory)
let mut rec = ItemRecord::read(map, object)?;
// Update the position to be offset by the origin passed in.
rec
.attributes
.position_mut()
.map(|pos| pos.0 += self.origin);
Ok(rec.into_ecs(self.world, self.z_level))
}
"action" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|pos| pos.0 += self.origin);
let _action = attributes.action().ok_or(format!(
"Could not read action {:?}\nDid read:\n{:?}",
object, attributes
))?;
println!("Creating action:\n{:?}", attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
}
"sprite" => Sprite::read(self, map, object),
"zone" | "fence" | "step_fence" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin + V2::new(0.0, object.height);
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"point" | "sound" | "music" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin;
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"barrier" => {
let mut attributes = Attributes::read(map, object)?;
let position = attributes
.position_mut()
.expect("Barrier object has no position");
position.0 = self.origin;
Ok(attributes.into_ecs(self.world, self.z_level))
}
ty => {
let gid = object.gid.clone();
if let Some(gid) = gid {
// Tiled tiles' origin is at the bottom of the tile, not the top
let y = object.y - object.height;
let p = self.origin + V2::new(object.x, y);
let size = (object.width as u32, object.height as u32);
let mut attribs = Attributes::read_gid(map, &gid, Some(size))?;
attribs.push(Attribute::Position(Position(p)));
let props = object
.properties
.iter()
.map(|p| (&p.name, p))
.collect::<HashMap<_, _>>();
let mut prop_attribs = Attributes::read_properties(&props)?;
attribs.append(&mut prop_attribs);
let attributes = Attributes { attribs };
println!(" {:?} with attributes:{:?}", ty, attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
} else {
if object.text.len() > 0 {
// This is a text object
let mut attribs = Attributes::read(map, object)?;
let p = attribs.position_mut().expect("Text must have a Position");
p.0 += self.origin;
println!(
" {:?} with attributes:{:?} and z_level:{:?}",
ty, attribs, self.z_level
);
Ok(attribs.into_ecs(self.world, self.z_level))
} else {
Err(format!("Unsupported object\n{:?}", object))
}
}
}
}
}
}
| {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
} | conditional_block |
map_loader.rs | use serde_json::Value;
use specs::prelude::*;
use std::collections::HashMap;
use std::path::Path;
use super::super::super::prelude::{
hex_color, Attribute, Attributes, BackgroundColor, CanBeEmpty, Color,
GlobalTileIndex, InventoryLayer, ItemRecord, Layer, LayerData, Object,
Position, Screen, Sprite, Tiledmap, ToonRecord, ZLevel, V2,
};
/// The result of loading one or more Tiled layers into the ECS.
pub struct LoadedLayers {
/// All the top-level entities loaded
pub top_level_entities: Vec<Entity>,
/// A HashMap of all entities loaded within a layer group, keyed by the group
/// layer's name.
pub groups: HashMap<String, Vec<Entity>>,
}
impl LoadedLayers {
pub fn new() -> LoadedLayers {
LoadedLayers {
top_level_entities: vec![],
groups: HashMap::new(),
}
}
pub fn all_entities(&self) -> Vec<Entity> {
let mut tops = self.top_level_entities.clone();
let mut groups: Vec<Entity> =
self.groups.values().flat_map(|es| es.clone()).collect();
tops.append(&mut groups);
tops
}
pub fn append_entities(&mut self, other: LoadedLayers) |
}
pub struct MapLoader<'a> {
loaded_maps: HashMap<String, Tiledmap>,
pub z_level: ZLevel,
pub world: &'a mut World,
pub origin: V2,
pub layer_group: Option<String>,
pub sprite: Option<Entity>,
}
impl<'a> MapLoader<'a> {
pub fn load_it(file: String, lazy: &LazyUpdate) {
lazy.exec_mut(|world| {
let mut loader = MapLoader::new(world);
let file = file;
let map: &Tiledmap = loader.load_map(&file);
// Get the background color based on the loaded map
let bg: Color = map
.backgroundcolor
.as_ref()
.map(|s: &String| {
hex_color(s.as_str())
.map_err(|e| format!("{:?}", e))
.map(|(_, c)| c)
})
.unwrap_or(Ok(Color::rgb(0, 0, 0)))
.unwrap()
.clone();
let width: u32 = map
.get_property_by_name("viewport_width")
.map(|value: &Value| {
value
.as_i64()
.expect("map's 'viewport_width' property type must be unsigned int")
as u32
})
.unwrap_or(map.width as u32 * map.tilewidth as u32);
// Get the screen size based on the loaded map
let height: u32 = map
.get_property_by_name("viewport_height")
.map(|value: &Value| {
value.as_i64().expect(
"map's 'viewport_height' property type must be unsigned int",
) as u32
})
.unwrap_or(map.height as u32 * map.tileheight as u32);
let res = loader.load(&file, None, None);
match res {
Ok(_) => {}
Err(msg) => panic!(msg),
}
let mut screen = world.write_resource::<Screen>();
screen.set_size((width, height));
let mut background_color = world.write_resource::<BackgroundColor>();
background_color.0 = bg;
});
}
/// Create a new MapLoader
pub fn new<'c>(world: &'c mut World) -> MapLoader<'c> {
MapLoader {
loaded_maps: HashMap::new(),
z_level: ZLevel(0.0),
world,
origin: V2::new(0.0, 0.0),
layer_group: None,
sprite: None,
}
}
fn load_map(&mut self, file: &String) -> &Tiledmap {
if!self.loaded_maps.contains_key(file) {
let map: Tiledmap = Tiledmap::new(&Path::new(&file.clone()));
self.loaded_maps.insert(file.clone(), map.clone());
}
self.loaded_maps.get(file).expect("Impossible!")
}
/// Sort the layers of a Tiledmap (in place) so that the layers
/// process correctly. Really we just want the inventories
/// layer to be loaded first.
pub fn sort_layers(&self, layers: &mut Vec<Layer>) {
let mut mndx = None;
'find_ndx: for (layer, i) in layers.iter().zip(0..) {
if let LayerData::Objects(_) = layer.layer_data {
if layer.name == "inventories" {
mndx = Some(i);
break 'find_ndx;
}
}
}
if let Some(ndx) = mndx {
let inv_layer = layers.remove(ndx);
layers.insert(0, inv_layer);
}
}
pub fn insert_map(
&mut self,
map: &mut Tiledmap,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.sort_layers(&mut map.layers);
let prev_group = self.layer_group.take();
self.layer_group = layer_group;
self.sprite = sprite;
let res = self.load_layers(&map.layers, &map)?;
self.layer_group = prev_group;
Ok(res)
}
/// Load an entire top-level map into the ECS.
/// Takes the file to load and optionally a layer group to load. If a layer
/// group is provided only layers within the group will be loaded. If no layer
/// group is provided all layers will be loaded.
/// Returns an error or a tuple consisting of
pub fn load(
&mut self,
file: &String,
layer_group: Option<String>,
sprite: Option<Entity>,
) -> Result<LoadedLayers, String> {
self.load_map(&file);
let mut map = self
.loaded_maps
.get(file)
.expect("Could not retreive map.")
.clone();
self.insert_map(&mut map, layer_group, sprite)
}
/// Possibly Increments the ZLevel based on layer properties
fn increment_z_by_layer(&mut self, layer: &Layer) {
let z_inc = layer.get_z_inc().unwrap_or(0);
if z_inc!= 0 {
self.z_level.0 += z_inc as f32;
println!(
"incrementing ZLevel to {:?} - layer {:?}",
self.z_level, layer.name
);
}
}
/// Load one layer of LayerData.
fn load_layer_data(
&mut self,
layer_name: &String,
data: &LayerData,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
println!("load_layer_data: {} at z:{:?}", layer_name, self.z_level);
match data {
LayerData::Tiles(tiles) => Ok(self.load_tile_layer(&tiles.data, map)?),
LayerData::Objects(objects) => {
if layer_name == "inventories" {
let inv_layer: InventoryLayer =
InventoryLayer::read(map, &objects.objects)?;
let top_level_entities =
inv_layer.into_ecs(self.world, self.z_level)?;
Ok(top_level_entities)
} else {
let top_level_entities = objects.objects.iter().fold(
Ok(vec![]),
|result: Result<Vec<Entity>, String>, obj: &Object| {
let ent = self.load_top_level_object(obj, map)?;
let mut ents = result?;
ents.push(ent);
Ok(ents)
},
)?;
Ok(top_level_entities)
}
}
LayerData::Layers(layers) => {
layers.layers.iter().fold(Ok(vec![]), |res, layer| {
let mut res = res?;
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
res.append(&mut ents);
Ok(res)
})
}
}
}
/// Load a vec of layers into the ECS
fn load_layers(
&mut self,
layers: &Vec<Layer>,
map: &Tiledmap,
) -> Result<LoadedLayers, String> {
let variant = self.layer_group.take();
// First figure out which layers we need to load
let layers_to_load: Vec<&Layer> = if variant.is_some() {
let variant_name = variant.as_ref().unwrap();
// Only get the variant layers
layers
.iter()
.filter_map(|layer| {
if layer.name == *variant_name {
match &layer.layer_data {
LayerData::Layers(variant_layers) => {
let variant_layers: Vec<&Layer> =
variant_layers.layers.iter().collect();
Some(variant_layers)
}
_ => None,
}
} else {
None
}
})
.flatten()
.collect()
} else {
// Return the layers as normal
layers.iter().collect()
};
let mut layers = LoadedLayers::new();
for layer in layers_to_load.iter() {
self.increment_z_by_layer(&layer);
let mut ents =
self.load_layer_data(&layer.name, &layer.layer_data, map)?;
// If this layer is part of a group, add it as a keyframe
if layer.is_group() {
layers.groups.insert(layer.name.clone(), ents);
} else {
layers.top_level_entities.append(&mut ents);
}
}
Ok(layers)
}
/// ## Loading tiles
/// Load a vector of tiles keyed by their GlobalId.
fn load_tile_layer(
&mut self,
tiles: &Vec<GlobalTileIndex>,
map: &Tiledmap,
) -> Result<Vec<Entity>, String> {
let (width, height) = (map.width as u32, map.height as u32);
let tw = map.tilewidth as u32;
let th = map.tileheight as u32;
println!(" layer width {:?} and height {:?}", width, height);
tiles
.iter()
.zip(0..)
.fold(Ok(vec![]), |result, (gid, ndx)| {
let mut ents = result?;
let yndx = ndx / width;
let xndx = ndx % width;
println!(" tile {:?} ({:?}, {:?})", ndx, xndx, yndx);
let tile_origin =
self.origin + V2::new((tw * xndx) as f32, (th * yndx) as f32);
let mut attribs = Attributes::read_gid(map, gid, None)?;
attribs.push(Attribute::Position(Position(tile_origin)));
let attributes = Attributes { attribs };
let ent = attributes.into_ecs(self.world, self.z_level);
ents.push(ent);
Ok(ents)
})
}
/// ## Loading objects
/// Load a top level object.
fn load_top_level_object(
&mut self,
object: &Object,
map: &Tiledmap,
) -> Result<Entity, String> {
let msg_name = object
.name
.non_empty()
.map(|s| s.clone())
.unwrap_or("unnamed".to_string());
println!("Encountered top object '{}'", msg_name);
match object.get_deep_type(map).as_str() {
"character" => {
// Load a character into the game world
let mut rec = ToonRecord::read(map, object)?;
rec.attributes.attribs = rec
.attributes
.attribs
.into_iter()
.map(|a| match a {
Attribute::Position(p) => {
Attribute::Position(Position(p.0 + self.origin))
}
a => a,
})
.collect();
Ok(rec.into_ecs(self.world, self.z_level))
}
"item" => {
// Load an item into the game world (not an inventory)
let mut rec = ItemRecord::read(map, object)?;
// Update the position to be offset by the origin passed in.
rec
.attributes
.position_mut()
.map(|pos| pos.0 += self.origin);
Ok(rec.into_ecs(self.world, self.z_level))
}
"action" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|pos| pos.0 += self.origin);
let _action = attributes.action().ok_or(format!(
"Could not read action {:?}\nDid read:\n{:?}",
object, attributes
))?;
println!("Creating action:\n{:?}", attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
}
"sprite" => Sprite::read(self, map, object),
"zone" | "fence" | "step_fence" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin + V2::new(0.0, object.height);
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"point" | "sound" | "music" => {
let mut attributes = Attributes::read(map, object)?;
attributes.position_mut().map(|p| {
p.0 += self.origin;
});
Ok(attributes.into_ecs(self.world, self.z_level))
}
"barrier" => {
let mut attributes = Attributes::read(map, object)?;
let position = attributes
.position_mut()
.expect("Barrier object has no position");
position.0 = self.origin;
Ok(attributes.into_ecs(self.world, self.z_level))
}
ty => {
let gid = object.gid.clone();
if let Some(gid) = gid {
// Tiled tiles' origin is at the bottom of the tile, not the top
let y = object.y - object.height;
let p = self.origin + V2::new(object.x, y);
let size = (object.width as u32, object.height as u32);
let mut attribs = Attributes::read_gid(map, &gid, Some(size))?;
attribs.push(Attribute::Position(Position(p)));
let props = object
.properties
.iter()
.map(|p| (&p.name, p))
.collect::<HashMap<_, _>>();
let mut prop_attribs = Attributes::read_properties(&props)?;
attribs.append(&mut prop_attribs);
let attributes = Attributes { attribs };
println!(" {:?} with attributes:{:?}", ty, attributes);
Ok(attributes.into_ecs(self.world, self.z_level))
} else {
if object.text.len() > 0 {
// This is a text object
let mut attribs = Attributes::read(map, object)?;
let p = attribs.position_mut().expect("Text must have a Position");
p.0 += self.origin;
println!(
" {:?} with attributes:{:?} and z_level:{:?}",
ty, attribs, self.z_level
);
Ok(attribs.into_ecs(self.world, self.z_level))
} else {
Err(format!("Unsupported object\n{:?}", object))
}
}
}
}
}
}
| {
let other_tops = other.top_level_entities.into_iter();
let other_groups = other.groups.into_iter();
self.top_level_entities.extend(other_tops);
self.groups.extend(other_groups);
} | identifier_body |
media_sessions.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{format_err, Error},
fidl::encoding::Decodable as FidlDecodable,
fidl::endpoints::{create_proxy, create_request_stream},
fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp},
fidl_fuchsia_media_sessions2::{
DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta,
SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions,
},
fuchsia_component::client::connect_to_service,
fuchsia_syslog::{fx_log_warn, fx_vlog},
futures::{Future, TryStreamExt},
parking_lot::RwLock,
std::collections::HashMap,
std::sync::Arc,
};
use crate::media::media_state::MediaState;
use crate::media::media_types::Notification;
use crate::types::{
bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE,
};
#[derive(Debug, Clone)]
pub(crate) struct MediaSessions {
inner: Arc<RwLock<MediaSessionsInner>>,
}
impl MediaSessions {
pub fn create() -> Self {
Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) }
}
// Returns a future that watches MediaPlayer for updates.
pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> {
// MediaSession Service Setup
// Set up the MediaSession Discovery service. Connect to the session watcher.
let discovery = connect_to_service::<DiscoveryMarker>()
.expect("Couldn't connect to discovery service.");
let (watcher_client, watcher_requests) =
create_request_stream().expect("Error creating watcher request stream");
// Only subscribe to updates from players that are active.
let watch_active_options =
WatchOptions { only_active: Some(true),..WatchOptions::new_empty() };
discovery
.watch_sessions(watch_active_options, watcher_client)
.expect("Should watch media sessions");
// End MediaSession Service Setup
let inner = self.inner.clone();
Self::watch_media_sessions(discovery, watcher_requests, inner)
}
pub fn get_active_session(&self) -> Result<MediaState, Error> {
let r_inner = self.inner.read().get_active_session();
r_inner.ok_or(format_err!("No active player"))
}
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
self.inner.read().get_supported_notification_events()
}
pub fn | (
&self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder)
}
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id!= id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session() {
state.clone()
} else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if!self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
let mut metadata = fidl_media_types::Metadata::new_empty();
let mut property1 = fidl_media_types::Property::new_empty();
property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string();
let sample_title = "This is a sample title".to_string();
property1.value = sample_title.clone();
metadata.properties = vec![property1];
metadata
}
fn create_player_status() -> fidl_media::PlayerStatus {
let mut player_status = fidl_media::PlayerStatus::new_empty();
let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty();
// Playback started at beginning of media.
timeline_fn.subject_time = 0;
// Monotonic clock time at beginning of media (nanos).
timeline_fn.reference_time = 500000000;
// Playback rate = 1, normal playback.
timeline_fn.subject_delta = 1;
timeline_fn.reference_delta = 1;
player_status.player_state = Some(fidl_media::PlayerState::Playing);
player_status.duration = Some(123456789);
player_status.shuffle_on = Some(true);
player_status.timeline_function = Some(timeline_fn);
player_status
}
#[test]
/// Test that retrieving a notification value correctly gets the current state.
/// 1) Query with an unsupported `event_id`.
/// 2) Query with a supported Event ID, with default state.
/// 3) Query with all supported Event IDs.
fn test_get_notification_value() {
let exec = fasync::Executor::new_with_fake_time().expect("executor should build");
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let media_sessions = MediaSessionsInner::new();
let (session_proxy, _) =
create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy.");
let mut media_state = MediaState::new(session_proxy);
// 1. Unsupported ID.
let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged;
let res = media_state.session_info().get_notification_value(&unsupported_id);
assert!(res.is_err());
// 2. Supported ID, `media_state` contains default values.
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged);
assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped));
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged);
assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX));
// 3.
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let mut info = fidl_media::SessionInfoDelta::new_empty();
info.metadata = Some(create_metadata());
info.player_status = Some(create_player_status());
media_state.update_session_info(info);
let expected_play_status = fidl_avrcp::PlaybackStatus::Playing;
let expected_pas = ValidPlayerApplicationSettings::new(
None,
Some(fidl_avrcp::RepeatStatusMode::Off),
Some(fidl_avrcp::ShuffleMode::AllTrackShuffle),
None,
);
// Supported = PAS, Playback, Track, TrackPos
let valid_events = media_sessions.get_supported_notification_events();
let expected_values: Vec<Notification> = vec![
Notification::new(None, None, None, Some(expected_pas), None, None, None),
Notification::new(Some(expected_play_status), None, None, None, None, None, None),
Notification::new(None, Some(0), None, None, None, None, None),
Notification::new(None, None, Some(55), None, None, None, None),
];
for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) {
assert_eq!(
media_state.session_info().get_notification_value(&event_id).expect("Should work"),
expected_v.clone()
);
}
}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests registering a notification works as expected.
/// 1. Normal case, insertion of a supported notification.
/// 2. Normal case, insertion of a supported notification, with eviction.
/// 3. Normal case, insertion of a supported notification, with change in state,
/// so that `update_notification_responders()` correctly updates inserted notif.
/// 3. Error case, insertion of an unsupported notification.
fn test_register_notification() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests insertion/updating of a new MediaSession into the state map.
/// 1. Test branch where MediaSession already exists, so this is just an update.
/// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it.
fn test_create_or_update_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests that updating any outstanding responders behaves as expected.
fn test_update_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests updating the active session_id correctly changes the currently
/// playing active media session, as well as clears any outstanding notifications
/// if a new MediaSession becomes the active session.
fn test_update_active_session_id() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests sending PlayerChanged response to all outstanding responders behaves
/// as expected, and removes all entries in the Notifications map.
fn test_clear_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests removing a session from the map.
/// Tests clear_session clears all notifications if the MediaSession is the currently
/// active session.
fn test_clear_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests clearing the active session_id.
fn test_clear_active_session_id() {}
}
| register_notification | identifier_name |
media_sessions.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{format_err, Error},
fidl::encoding::Decodable as FidlDecodable,
fidl::endpoints::{create_proxy, create_request_stream},
fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp},
fidl_fuchsia_media_sessions2::{
DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta,
SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions,
},
fuchsia_component::client::connect_to_service,
fuchsia_syslog::{fx_log_warn, fx_vlog},
futures::{Future, TryStreamExt},
parking_lot::RwLock,
std::collections::HashMap,
std::sync::Arc,
};
use crate::media::media_state::MediaState;
use crate::media::media_types::Notification;
use crate::types::{
bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE,
};
#[derive(Debug, Clone)]
pub(crate) struct MediaSessions {
inner: Arc<RwLock<MediaSessionsInner>>,
}
impl MediaSessions {
pub fn create() -> Self {
Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) }
}
// Returns a future that watches MediaPlayer for updates.
pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> {
// MediaSession Service Setup
// Set up the MediaSession Discovery service. Connect to the session watcher.
let discovery = connect_to_service::<DiscoveryMarker>()
.expect("Couldn't connect to discovery service.");
let (watcher_client, watcher_requests) =
create_request_stream().expect("Error creating watcher request stream");
// Only subscribe to updates from players that are active.
let watch_active_options =
WatchOptions { only_active: Some(true),..WatchOptions::new_empty() };
discovery
.watch_sessions(watch_active_options, watcher_client)
.expect("Should watch media sessions");
// End MediaSession Service Setup
let inner = self.inner.clone();
Self::watch_media_sessions(discovery, watcher_requests, inner)
}
pub fn get_active_session(&self) -> Result<MediaState, Error> {
let r_inner = self.inner.read().get_active_session();
r_inner.ok_or(format_err!("No active player"))
}
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
self.inner.read().get_supported_notification_events()
}
pub fn register_notification(
&self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder) | }
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id!= id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session() {
state.clone()
} else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if!self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
let mut metadata = fidl_media_types::Metadata::new_empty();
let mut property1 = fidl_media_types::Property::new_empty();
property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string();
let sample_title = "This is a sample title".to_string();
property1.value = sample_title.clone();
metadata.properties = vec![property1];
metadata
}
fn create_player_status() -> fidl_media::PlayerStatus {
let mut player_status = fidl_media::PlayerStatus::new_empty();
let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty();
// Playback started at beginning of media.
timeline_fn.subject_time = 0;
// Monotonic clock time at beginning of media (nanos).
timeline_fn.reference_time = 500000000;
// Playback rate = 1, normal playback.
timeline_fn.subject_delta = 1;
timeline_fn.reference_delta = 1;
player_status.player_state = Some(fidl_media::PlayerState::Playing);
player_status.duration = Some(123456789);
player_status.shuffle_on = Some(true);
player_status.timeline_function = Some(timeline_fn);
player_status
}
#[test]
/// Test that retrieving a notification value correctly gets the current state.
/// 1) Query with an unsupported `event_id`.
/// 2) Query with a supported Event ID, with default state.
/// 3) Query with all supported Event IDs.
fn test_get_notification_value() {
let exec = fasync::Executor::new_with_fake_time().expect("executor should build");
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let media_sessions = MediaSessionsInner::new();
let (session_proxy, _) =
create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy.");
let mut media_state = MediaState::new(session_proxy);
// 1. Unsupported ID.
let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged;
let res = media_state.session_info().get_notification_value(&unsupported_id);
assert!(res.is_err());
// 2. Supported ID, `media_state` contains default values.
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged);
assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped));
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged);
assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX));
// 3.
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let mut info = fidl_media::SessionInfoDelta::new_empty();
info.metadata = Some(create_metadata());
info.player_status = Some(create_player_status());
media_state.update_session_info(info);
let expected_play_status = fidl_avrcp::PlaybackStatus::Playing;
let expected_pas = ValidPlayerApplicationSettings::new(
None,
Some(fidl_avrcp::RepeatStatusMode::Off),
Some(fidl_avrcp::ShuffleMode::AllTrackShuffle),
None,
);
// Supported = PAS, Playback, Track, TrackPos
let valid_events = media_sessions.get_supported_notification_events();
let expected_values: Vec<Notification> = vec![
Notification::new(None, None, None, Some(expected_pas), None, None, None),
Notification::new(Some(expected_play_status), None, None, None, None, None, None),
Notification::new(None, Some(0), None, None, None, None, None),
Notification::new(None, None, Some(55), None, None, None, None),
];
for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) {
assert_eq!(
media_state.session_info().get_notification_value(&event_id).expect("Should work"),
expected_v.clone()
);
}
}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests registering a notification works as expected.
/// 1. Normal case, insertion of a supported notification.
/// 2. Normal case, insertion of a supported notification, with eviction.
/// 3. Normal case, insertion of a supported notification, with change in state,
/// so that `update_notification_responders()` correctly updates inserted notif.
/// 3. Error case, insertion of an unsupported notification.
fn test_register_notification() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests insertion/updating of a new MediaSession into the state map.
/// 1. Test branch where MediaSession already exists, so this is just an update.
/// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it.
fn test_create_or_update_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests that updating any outstanding responders behaves as expected.
fn test_update_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests updating the active session_id correctly changes the currently
/// playing active media session, as well as clears any outstanding notifications
/// if a new MediaSession becomes the active session.
fn test_update_active_session_id() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests sending PlayerChanged response to all outstanding responders behaves
/// as expected, and removes all entries in the Notifications map.
fn test_clear_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests removing a session from the map.
/// Tests clear_session clears all notifications if the MediaSession is the currently
/// active session.
fn test_clear_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests clearing the active session_id.
fn test_clear_active_session_id() {}
} | random_line_split |
|
media_sessions.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{format_err, Error},
fidl::encoding::Decodable as FidlDecodable,
fidl::endpoints::{create_proxy, create_request_stream},
fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp},
fidl_fuchsia_media_sessions2::{
DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta,
SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions,
},
fuchsia_component::client::connect_to_service,
fuchsia_syslog::{fx_log_warn, fx_vlog},
futures::{Future, TryStreamExt},
parking_lot::RwLock,
std::collections::HashMap,
std::sync::Arc,
};
use crate::media::media_state::MediaState;
use crate::media::media_types::Notification;
use crate::types::{
bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE,
};
#[derive(Debug, Clone)]
pub(crate) struct MediaSessions {
inner: Arc<RwLock<MediaSessionsInner>>,
}
impl MediaSessions {
pub fn create() -> Self {
Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) }
}
// Returns a future that watches MediaPlayer for updates.
pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> {
// MediaSession Service Setup
// Set up the MediaSession Discovery service. Connect to the session watcher.
let discovery = connect_to_service::<DiscoveryMarker>()
.expect("Couldn't connect to discovery service.");
let (watcher_client, watcher_requests) =
create_request_stream().expect("Error creating watcher request stream");
// Only subscribe to updates from players that are active.
let watch_active_options =
WatchOptions { only_active: Some(true),..WatchOptions::new_empty() };
discovery
.watch_sessions(watch_active_options, watcher_client)
.expect("Should watch media sessions");
// End MediaSession Service Setup
let inner = self.inner.clone();
Self::watch_media_sessions(discovery, watcher_requests, inner)
}
pub fn get_active_session(&self) -> Result<MediaState, Error> {
let r_inner = self.inner.read().get_active_session();
r_inner.ok_or(format_err!("No active player"))
}
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
self.inner.read().get_supported_notification_events()
}
pub fn register_notification(
&self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder)
}
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id!= id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session() | else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if!self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
let mut metadata = fidl_media_types::Metadata::new_empty();
let mut property1 = fidl_media_types::Property::new_empty();
property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string();
let sample_title = "This is a sample title".to_string();
property1.value = sample_title.clone();
metadata.properties = vec![property1];
metadata
}
fn create_player_status() -> fidl_media::PlayerStatus {
let mut player_status = fidl_media::PlayerStatus::new_empty();
let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty();
// Playback started at beginning of media.
timeline_fn.subject_time = 0;
// Monotonic clock time at beginning of media (nanos).
timeline_fn.reference_time = 500000000;
// Playback rate = 1, normal playback.
timeline_fn.subject_delta = 1;
timeline_fn.reference_delta = 1;
player_status.player_state = Some(fidl_media::PlayerState::Playing);
player_status.duration = Some(123456789);
player_status.shuffle_on = Some(true);
player_status.timeline_function = Some(timeline_fn);
player_status
}
#[test]
/// Test that retrieving a notification value correctly gets the current state.
/// 1) Query with an unsupported `event_id`.
/// 2) Query with a supported Event ID, with default state.
/// 3) Query with all supported Event IDs.
fn test_get_notification_value() {
let exec = fasync::Executor::new_with_fake_time().expect("executor should build");
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let media_sessions = MediaSessionsInner::new();
let (session_proxy, _) =
create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy.");
let mut media_state = MediaState::new(session_proxy);
// 1. Unsupported ID.
let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged;
let res = media_state.session_info().get_notification_value(&unsupported_id);
assert!(res.is_err());
// 2. Supported ID, `media_state` contains default values.
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged);
assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped));
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged);
assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX));
// 3.
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let mut info = fidl_media::SessionInfoDelta::new_empty();
info.metadata = Some(create_metadata());
info.player_status = Some(create_player_status());
media_state.update_session_info(info);
let expected_play_status = fidl_avrcp::PlaybackStatus::Playing;
let expected_pas = ValidPlayerApplicationSettings::new(
None,
Some(fidl_avrcp::RepeatStatusMode::Off),
Some(fidl_avrcp::ShuffleMode::AllTrackShuffle),
None,
);
// Supported = PAS, Playback, Track, TrackPos
let valid_events = media_sessions.get_supported_notification_events();
let expected_values: Vec<Notification> = vec![
Notification::new(None, None, None, Some(expected_pas), None, None, None),
Notification::new(Some(expected_play_status), None, None, None, None, None, None),
Notification::new(None, Some(0), None, None, None, None, None),
Notification::new(None, None, Some(55), None, None, None, None),
];
for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) {
assert_eq!(
media_state.session_info().get_notification_value(&event_id).expect("Should work"),
expected_v.clone()
);
}
}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests registering a notification works as expected.
/// 1. Normal case, insertion of a supported notification.
/// 2. Normal case, insertion of a supported notification, with eviction.
/// 3. Normal case, insertion of a supported notification, with change in state,
/// so that `update_notification_responders()` correctly updates inserted notif.
/// 3. Error case, insertion of an unsupported notification.
fn test_register_notification() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests insertion/updating of a new MediaSession into the state map.
/// 1. Test branch where MediaSession already exists, so this is just an update.
/// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it.
fn test_create_or_update_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests that updating any outstanding responders behaves as expected.
fn test_update_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests updating the active session_id correctly changes the currently
/// playing active media session, as well as clears any outstanding notifications
/// if a new MediaSession becomes the active session.
fn test_update_active_session_id() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests sending PlayerChanged response to all outstanding responders behaves
/// as expected, and removes all entries in the Notifications map.
fn test_clear_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests removing a session from the map.
/// Tests clear_session clears all notifications if the MediaSession is the currently
/// active session.
fn test_clear_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests clearing the active session_id.
fn test_clear_active_session_id() {}
}
| {
state.clone()
} | conditional_block |
media_sessions.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::{format_err, Error},
fidl::encoding::Decodable as FidlDecodable,
fidl::endpoints::{create_proxy, create_request_stream},
fidl_fuchsia_bluetooth_avrcp::{self as fidl_avrcp},
fidl_fuchsia_media_sessions2::{
DiscoveryMarker, DiscoveryProxy, SessionControlProxy, SessionInfoDelta,
SessionsWatcherRequest, SessionsWatcherRequestStream, WatchOptions,
},
fuchsia_component::client::connect_to_service,
fuchsia_syslog::{fx_log_warn, fx_vlog},
futures::{Future, TryStreamExt},
parking_lot::RwLock,
std::collections::HashMap,
std::sync::Arc,
};
use crate::media::media_state::MediaState;
use crate::media::media_types::Notification;
use crate::types::{
bounded_queue::BoundedQueue, NotificationData, MAX_NOTIFICATION_EVENT_QUEUE_SIZE,
};
#[derive(Debug, Clone)]
pub(crate) struct MediaSessions {
inner: Arc<RwLock<MediaSessionsInner>>,
}
impl MediaSessions {
pub fn create() -> Self {
Self { inner: Arc::new(RwLock::new(MediaSessionsInner::new())) }
}
// Returns a future that watches MediaPlayer for updates.
pub fn watch(&self) -> impl Future<Output = Result<(), anyhow::Error>> {
// MediaSession Service Setup
// Set up the MediaSession Discovery service. Connect to the session watcher.
let discovery = connect_to_service::<DiscoveryMarker>()
.expect("Couldn't connect to discovery service.");
let (watcher_client, watcher_requests) =
create_request_stream().expect("Error creating watcher request stream");
// Only subscribe to updates from players that are active.
let watch_active_options =
WatchOptions { only_active: Some(true),..WatchOptions::new_empty() };
discovery
.watch_sessions(watch_active_options, watcher_client)
.expect("Should watch media sessions");
// End MediaSession Service Setup
let inner = self.inner.clone();
Self::watch_media_sessions(discovery, watcher_requests, inner)
}
pub fn get_active_session(&self) -> Result<MediaState, Error> {
let r_inner = self.inner.read().get_active_session();
r_inner.ok_or(format_err!("No active player"))
}
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
self.inner.read().get_supported_notification_events()
}
pub fn register_notification(
&self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
let mut write = self.inner.write();
write.register_notification(event_id, current, pos_change_interval, responder)
}
async fn watch_media_sessions(
discovery: DiscoveryProxy,
mut watcher_requests: SessionsWatcherRequestStream,
sessions_inner: Arc<RwLock<MediaSessionsInner>>,
) -> Result<(), anyhow::Error> {
while let Some(req) =
watcher_requests.try_next().await.expect("Failed to serve Watcher service")
{
match req {
SessionsWatcherRequest::SessionUpdated {
session_id: id,
session_info_delta: delta,
responder,
} => {
responder.send()?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession update: id[{}], delta[{:?}]", id, delta);
// Since we are only listening to active sessions, update the currently
// active media session id every time a watcher event is triggered.
// This means AVRCP commands will be queried/set to the player that has most
// recently changed in status.
sessions_inner.write().update_active_session_id(Some(id.clone()));
// If this is our first time receiving updates from this MediaPlayer, create
// a session control proxy and connect to the session.
sessions_inner.write().create_or_update_session(
discovery.clone(),
id.clone(),
delta,
&create_session_control_proxy,
)?;
fx_vlog!(tag: "avrcp-tg", 1, "MediaSession state after update: state[{:?}]", sessions_inner);
}
SessionsWatcherRequest::SessionRemoved { session_id, responder } => {
// A media session with id `session_id` has been removed.
responder.send()?;
// Clear any outstanding notifications with a player changed response.
// Clear the currently active session, if it equals `session_id`.
// Clear entry in state map.
sessions_inner.write().clear_session(&session_id);
fx_vlog!(tag: "avrcp-tg", 1, "Removed session [{:?}] from state map: {:?}", session_id, sessions_inner);
}
}
}
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct MediaSessionsInner {
// The currently active MediaSession id.
// If present, the `active_session_id` should be present in `map`.
active_session_id: Option<u64>,
// The map of ids to the respective media session.
map: HashMap<u64, MediaState>,
// The map of outstanding notifications.
notifications: HashMap<fidl_avrcp::NotificationEvent, BoundedQueue<NotificationData>>,
}
impl MediaSessionsInner {
pub fn new() -> Self {
Self { active_session_id: None, map: HashMap::new(), notifications: HashMap::new() }
}
pub fn get_active_session(&self) -> Option<MediaState> {
self.active_session_id.as_ref().and_then(|id| self.map.get(id).cloned())
}
/// TODO(41703): Add TRACK_POS_CHANGED when implemented.
pub fn get_supported_notification_events(&self) -> Vec<fidl_avrcp::NotificationEvent> {
vec![
fidl_avrcp::NotificationEvent::PlayerApplicationSettingChanged,
fidl_avrcp::NotificationEvent::PlaybackStatusChanged,
fidl_avrcp::NotificationEvent::TrackChanged,
]
}
/// Removes the MediaState specified by `id` from the map, should it exist.
/// If the session was currently active, clears `self.active_session_id`.
/// Returns the removed MediaState.
pub fn clear_session(&mut self, id: &u64) -> Option<MediaState> {
if Some(id) == self.active_session_id.as_ref() {
self.update_active_session_id(None);
}
self.map.remove(id)
}
/// Clears all outstanding notifications with an AddressedPlayerChanged error.
/// See `crate::types::update_responder` for more details.
pub fn clear_notification_responders(&mut self) {
for notif_data in self.notifications.drain().map(|(_, q)| q.into_iter()).flatten() {
if let Err(e) = notif_data.update_responder(
&fidl_avrcp::NotificationEvent::TrackChanged, // Irrelevant Event ID.
Err(fidl_avrcp::TargetAvcError::RejectedAddressedPlayerChanged),
) {
fx_log_warn!("There was an error clearing the responder: {:?}", e);
}
}
fx_vlog!(tag: "avrcp-tg", 1, "After evicting cleared responders: {:?}", self.notifications);
}
/// Updates the active session with the new session specified by `id`.
/// Clear all outstanding notifications, if the active session has changed.
/// If the updated active session_id has changed, return old active id.
pub fn update_active_session_id(&mut self, id: Option<u64>) -> Option<u64> {
if self.active_session_id!= id {
self.clear_notification_responders();
let previous_active_session_id = self.active_session_id.take();
self.active_session_id = id;
previous_active_session_id
} else {
None
}
}
/// If an active session is present, update any outstanding notifications by
/// checking if notification values have changed.
/// TODO(41703): Take pos_change_interval into account when updating TRACK_POS_CHANGED.
pub fn update_notification_responders(&mut self) {
let state = if let Some(state) = self.get_active_session() {
state.clone()
} else {
return;
};
self.notifications = self
.notifications
.drain()
.map(|(event_id, queue)| {
let curr_value = state.session_info().get_notification_value(&event_id);
(
event_id,
queue
.into_iter()
.filter_map(|notif_data| {
notif_data
.update_responder(&event_id, curr_value.clone())
.unwrap_or(None)
})
.collect(),
)
})
.collect();
fx_vlog!(tag: "avrcp-tg", 1, "After evicting updated responders: {:?}", self.notifications);
}
/// If the entry, `id` doesn't exist in the map, create a `MediaState` entry
/// when the control proxy.
/// Update the state with the delta.
/// Update any outstanding notification responders with the change in state.
pub fn create_or_update_session<F>(
&mut self,
discovery: DiscoveryProxy,
id: u64,
delta: SessionInfoDelta,
create_fn: F,
) -> Result<(), Error>
where
F: Fn(DiscoveryProxy, u64) -> Result<SessionControlProxy, Error>,
{
self.map
.entry(id)
.or_insert({
let session_proxy = create_fn(discovery, id)?;
MediaState::new(session_proxy)
})
.update_session_info(delta);
self.update_notification_responders();
Ok(())
}
/// Given a notification `event_id`:
/// 1) insert it into the notifications map.
/// 2) If the queue for `event_id` is full, evict the oldest responder and respond
/// with the current value.
/// 3) Update any outstanding notification responders with any changes in state.
pub fn register_notification(
&mut self,
event_id: fidl_avrcp::NotificationEvent,
current: Notification,
pos_change_interval: u32,
responder: fidl_avrcp::TargetHandlerWatchNotificationResponder,
) -> Result<(), fidl::Error> {
// If the `event_id` is not supported, reject the registration.
if!self.get_supported_notification_events().contains(&event_id) {
return responder.send(&mut Err(fidl_avrcp::TargetAvcError::RejectedInvalidParameter));
}
let data = NotificationData::new(current, pos_change_interval, responder);
let _evicted = self
.notifications
.entry(event_id)
.or_insert(BoundedQueue::new(MAX_NOTIFICATION_EVENT_QUEUE_SIZE))
.insert(data);
// Notify the evicted responder that the TG has removed it from the active list of responders.
// Reply with the current value of the notification.
// This will happen automatically, when `_evicted` is dropped.
// Update outstanding responders with potentially new session data.
self.update_notification_responders();
Ok(())
}
}
/// Creates a session control proxy from the Discovery protocol and connects to
/// the session specified by `id`.
fn create_session_control_proxy(
discovery: DiscoveryProxy,
id: u64,
) -> Result<SessionControlProxy, Error> {
let (session_proxy, session_request_stream) = create_proxy()?;
discovery.connect_to_session(id, session_request_stream)?;
Ok(session_proxy)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::media::media_types::ValidPlayerApplicationSettings;
use fidl::encoding::Decodable as FidlDecodable;
use fidl::endpoints::create_proxy;
use fidl_fuchsia_media::{self as fidl_media_types};
use fidl_fuchsia_media_sessions2::{self as fidl_media, SessionControlMarker};
use fuchsia_async as fasync;
fn create_metadata() -> fidl_media_types::Metadata {
let mut metadata = fidl_media_types::Metadata::new_empty();
let mut property1 = fidl_media_types::Property::new_empty();
property1.label = fidl_media_types::METADATA_LABEL_TITLE.to_string();
let sample_title = "This is a sample title".to_string();
property1.value = sample_title.clone();
metadata.properties = vec![property1];
metadata
}
fn create_player_status() -> fidl_media::PlayerStatus {
let mut player_status = fidl_media::PlayerStatus::new_empty();
let mut timeline_fn = fidl_media_types::TimelineFunction::new_empty();
// Playback started at beginning of media.
timeline_fn.subject_time = 0;
// Monotonic clock time at beginning of media (nanos).
timeline_fn.reference_time = 500000000;
// Playback rate = 1, normal playback.
timeline_fn.subject_delta = 1;
timeline_fn.reference_delta = 1;
player_status.player_state = Some(fidl_media::PlayerState::Playing);
player_status.duration = Some(123456789);
player_status.shuffle_on = Some(true);
player_status.timeline_function = Some(timeline_fn);
player_status
}
#[test]
/// Test that retrieving a notification value correctly gets the current state.
/// 1) Query with an unsupported `event_id`.
/// 2) Query with a supported Event ID, with default state.
/// 3) Query with all supported Event IDs.
fn test_get_notification_value() {
let exec = fasync::Executor::new_with_fake_time().expect("executor should build");
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let media_sessions = MediaSessionsInner::new();
let (session_proxy, _) =
create_proxy::<SessionControlMarker>().expect("Couldn't create fidl proxy.");
let mut media_state = MediaState::new(session_proxy);
// 1. Unsupported ID.
let unsupported_id = fidl_avrcp::NotificationEvent::BattStatusChanged;
let res = media_state.session_info().get_notification_value(&unsupported_id);
assert!(res.is_err());
// 2. Supported ID, `media_state` contains default values.
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::PlaybackStatusChanged);
assert_eq!(res.expect("Should be ok").status, Some(fidl_avrcp::PlaybackStatus::Stopped));
let res = media_state
.session_info()
.get_notification_value(&fidl_avrcp::NotificationEvent::TrackChanged);
assert_eq!(res.expect("Should be ok").track_id, Some(std::u64::MAX));
// 3.
exec.set_fake_time(fasync::Time::from_nanos(555555555));
let mut info = fidl_media::SessionInfoDelta::new_empty();
info.metadata = Some(create_metadata());
info.player_status = Some(create_player_status());
media_state.update_session_info(info);
let expected_play_status = fidl_avrcp::PlaybackStatus::Playing;
let expected_pas = ValidPlayerApplicationSettings::new(
None,
Some(fidl_avrcp::RepeatStatusMode::Off),
Some(fidl_avrcp::ShuffleMode::AllTrackShuffle),
None,
);
// Supported = PAS, Playback, Track, TrackPos
let valid_events = media_sessions.get_supported_notification_events();
let expected_values: Vec<Notification> = vec![
Notification::new(None, None, None, Some(expected_pas), None, None, None),
Notification::new(Some(expected_play_status), None, None, None, None, None, None),
Notification::new(None, Some(0), None, None, None, None, None),
Notification::new(None, None, Some(55), None, None, None, None),
];
for (event_id, expected_v) in valid_events.iter().zip(expected_values.iter()) {
assert_eq!(
media_state.session_info().get_notification_value(&event_id).expect("Should work"),
expected_v.clone()
);
}
}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests registering a notification works as expected.
/// 1. Normal case, insertion of a supported notification.
/// 2. Normal case, insertion of a supported notification, with eviction.
/// 3. Normal case, insertion of a supported notification, with change in state,
/// so that `update_notification_responders()` correctly updates inserted notif.
/// 3. Error case, insertion of an unsupported notification.
fn test_register_notification() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests insertion/updating of a new MediaSession into the state map.
/// 1. Test branch where MediaSession already exists, so this is just an update.
/// 2. Test branch where MediaSession doesn't exist, creates a new session and updates it.
fn test_create_or_update_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests that updating any outstanding responders behaves as expected.
fn test_update_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests updating the active session_id correctly changes the currently
/// playing active media session, as well as clears any outstanding notifications
/// if a new MediaSession becomes the active session.
fn test_update_active_session_id() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests sending PlayerChanged response to all outstanding responders behaves
/// as expected, and removes all entries in the Notifications map.
fn test_clear_notification_responders() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests removing a session from the map.
/// Tests clear_session clears all notifications if the MediaSession is the currently
/// active session.
fn test_clear_session() {}
#[test]
// TODO(42623): Implement this test as part of integration test work.
/// Tests clearing the active session_id.
fn test_clear_active_session_id() |
}
| {} | identifier_body |
zhtta.rs | }
</style></head>
<body>";
//static mut visitor_count : usize = 0;
struct HTTP_Request {
// Use peer_name as the key to access TcpStream in hashmap.
// (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound.
// See issue: https://github.com/mozilla/rust/issues/12139)
peer_name: String,
path: Path,
}
struct WebServer {
ip: String,
port: usize,
www_dir_path: Path,
request_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>,
stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>,//it is a hash map,store the http stream for each ip
visitor_count : Arc<Mutex<usize>>,
thread_sema : Arc<Semaphore>,
cache: Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,//cache content, counter for LRU, modified time
cache_len: Arc<Mutex<usize>>,
notify_rx: Receiver<()>,
notify_tx: Sender<()>,
}
impl WebServer {
fn new(ip: String, port: usize, www_dir: String) -> WebServer {
let (notify_tx, notify_rx) = channel();
let www_dir_path = Path::new(www_dir);
os::change_dir(&www_dir_path);
WebServer {
ip:ip,
port: port,
www_dir_path: www_dir_path,
request_queue_arc: Arc::new(Mutex::new(Vec::new())),
stream_map_arc: Arc::new(Mutex::new(HashMap::new())),
visitor_count:Arc::new(Mutex::new(0)),
thread_sema: Arc::new(Semaphore::new(5)),
cache: Arc::new(RwLock::new(HashMap::new())),
cache_len: Arc::new(Mutex::new(0)),
notify_rx: notify_rx,
notify_tx: notify_tx,
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice());
let www_dir_path_str = self.www_dir_path.clone();
let request_queue_arc = self.request_queue_arc.clone();
let notify_tx = self.notify_tx.clone();
let stream_map_arc = self.stream_map_arc.clone();
let visitor_count=self.visitor_count.clone();
Thread::spawn(move|| {
let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap();
let mut acceptor = listener.listen().unwrap();
println!("{} listening on {} (serving from: {}).",
SERVER_NAME, addr, www_dir_path_str.as_str().unwrap());
for stream_raw in acceptor.incoming() { //for each stream/connection
let (queue_tx, queue_rx) = channel();//build up a channel for sub thread
queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread
let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver
let stream_map_arc = stream_map_arc.clone();
let visitor_count=visitor_count.clone();
println!("outer thread:{}",*visitor_count.lock().unwrap());
// Spawn a task to handle the connection.
Thread::spawn(move|| {
let mut vc= visitor_count.lock().unwrap(); // Done
*vc+=1;
println!("inner thread:{}",*vc);
let request_queue_arc = queue_rx.recv().unwrap();//
let mut stream = match stream_raw {
Ok(s) => {s}
Err(e) => { panic!("Error getting the listener stream! {}", e) }
};
let peer_name = WebServer::get_peer_name(&mut stream);
debug!("Got connection from {}", peer_name);
let mut buf: [u8;500] = [0;500];
stream.read(&mut buf);
let request_str = match str::from_utf8(&buf){
Ok(s) => s,
Err(e)=> panic!("Error reading from the listener stream! {}", e),
};
debug!("Request:\n{}", request_str);
//WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);
let req_group: Vec<&str> = request_str.splitn(3,'').collect();
if req_group.len() > 2 {
let path_str = ".".to_string() + req_group[1];
let mut path_obj = os::getcwd().unwrap();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{}]", path_str);
if path_str.as_slice().eq("./") {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream,*vc);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{
WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);}
else{
debug!("small file, do it without enqueue!");
let mut file_reader = File::open(&path_obj).unwrap();
stream.write(HTTP_OK.as_bytes());
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
}
}
}
}
});
}
});
}
fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
let mut stream = stream;
let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path"));
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// Done
fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) |
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) {
let mut stream = stream;
let mut cache_str=String::new();
let mut counter=0;
let mut local_cache=cache.clone();
let mut is_modified=false;
{
let metadata=std::fs::metadata(path).unwrap();
let modify_time=metadata.modified();
let read_hash=local_cache.read().unwrap();
if read_hash.contains_key(path)
{
let tuple=read_hash.get(path).unwrap();
let time = tuple.2;
is_modified= (time!=modify_time);
}
}
if is_modified{
debug!("It is modified, delete from cache!");
let mut write_hash=local_cache.write().unwrap();
write_hash.remove(path);
}
//let mut local_cache=cache.clone();
{
debug!("updating counter...");
let read_hash=local_cache.read().unwrap();
for (key,value) in read_hash.iter(){
let mut counter=value.1.lock().unwrap();
*counter+=1;
}
if read_hash.contains_key(path){
debug!("Reading cached file:{}",path.display());
let mut pair=read_hash.get(path).unwrap();
{
*pair.1.lock().unwrap()=0;
}
stream.write(HTTP_OK.as_bytes());
let _ = stream.write_all(pair.0.as_bytes());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write_hash=local_cache.write().unwrap();
let mut to_be_replaced : Path=Path::new("./");
if *cache_len.lock().unwrap()>5{
let mut max_num=0;
//let read_hash=local_cache.write().unwrap();
let mut tmp: &Path=&Path::new("./");
for (key,value) in write_hash.iter(){
let num=*value.1.lock().unwrap();
if num>=max_num{
max_num=num;
tmp=key;
}
}
to_be_replaced=tmp.clone();
}else
{
return;
}
debug!("least recently used is:{}",to_be_replaced.display());
write_hash.remove(&to_be_replaced);
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
//scan the shtml to find the ssl tag, extract the command line redirect the command line to
//our file and serve it
let mut stream = stream;
let mut file =match File::open(path)
{
Err(why) => panic!("Coundn't open file:{}",why),
Ok(file) => file,
};
let mut s= String::new();
s=match file.read_to_string(){
Err(why) => panic!("Couldn't read file:{}",why),
Ok(content) => content,
};
let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect();
let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect();
let cmd=cmd_mix[0].to_string();
let mut args =Vec::new();
args.push("-c");
args.push(&cmd);
let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){
Err(why) => panic!("Couldn't do command {}",why),
Ok(cmd) => cmd,
};
let mut stdout=gash_command.stdout.unwrap();
let mut output=String::new();
stdout.read_to_string(&mut output);
stream.write(HTTP_OK.as_bytes());
stream.write(str_vec[0].as_bytes());
stream.write(output.as_bytes());
stream.write(cmd_mix[1].as_bytes());
//WebServer::respond_with_static_file(stream, path);
}
fn get_file_size(path: &Path) ->u64 {
let metadata=std::fs::metadata(path).unwrap();
return metadata.len()
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_tx, stream_rx) = channel();
stream_tx.send(stream);
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let local_stream_map = stream_map_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_stream_map = local_stream_map.lock().unwrap();
local_stream_map.insert(peer_name.clone(), stream);
}
// Enqueue the HTTP request.
// TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed
let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() };
let (req_tx, req_rx) = channel();
req_tx.send(req);
debug!("Waiting for queue mutex lock.");
let local_req_queue = req_queue_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_req_queue = local_req_queue.lock().unwrap();
let req: HTTP_Request = match req_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
//REORDER the queue in order of the request size
local_req_queue.push(req);
local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path)));
debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len());
notify_chan.send(()); // Send incoming notification to responder task.
}
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
// Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx.
let (request_tx, request_rx) = channel();
loop {
self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut req_queue = req_queue_get.lock().unwrap();
if req_queue.len() > 0 {
self.thread_sema.acquire();
let req = req_queue.remove(0);
debug!("A new request dequeued, now the length of queue is {}.", req_queue.len());
request_tx.send(req);
| {
let mut stream = stream;
let response: String =
format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
unsafe { visitor_count } );
debug!("Responding to counter request");
stream.write(response.as_bytes());
} | identifier_body |
zhtta.rs | green }
</style></head>
<body>";
//static mut visitor_count : usize = 0;
struct HTTP_Request {
// Use peer_name as the key to access TcpStream in hashmap.
// (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound.
// See issue: https://github.com/mozilla/rust/issues/12139)
peer_name: String,
path: Path,
}
struct WebServer {
ip: String,
port: usize,
www_dir_path: Path,
request_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>,
stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>,//it is a hash map,store the http stream for each ip
visitor_count : Arc<Mutex<usize>>,
thread_sema : Arc<Semaphore>,
cache: Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,//cache content, counter for LRU, modified time
cache_len: Arc<Mutex<usize>>,
notify_rx: Receiver<()>,
notify_tx: Sender<()>,
}
impl WebServer {
fn new(ip: String, port: usize, www_dir: String) -> WebServer {
let (notify_tx, notify_rx) = channel();
let www_dir_path = Path::new(www_dir);
os::change_dir(&www_dir_path);
WebServer {
ip:ip,
port: port,
www_dir_path: www_dir_path,
request_queue_arc: Arc::new(Mutex::new(Vec::new())),
stream_map_arc: Arc::new(Mutex::new(HashMap::new())),
visitor_count:Arc::new(Mutex::new(0)),
thread_sema: Arc::new(Semaphore::new(5)),
cache: Arc::new(RwLock::new(HashMap::new())),
cache_len: Arc::new(Mutex::new(0)),
notify_rx: notify_rx,
notify_tx: notify_tx,
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = String::from_str(format!("{}:{}", self.ip, self.port).as_slice());
let www_dir_path_str = self.www_dir_path.clone();
let request_queue_arc = self.request_queue_arc.clone();
let notify_tx = self.notify_tx.clone();
let stream_map_arc = self.stream_map_arc.clone();
let visitor_count=self.visitor_count.clone();
Thread::spawn(move|| {
let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap();
let mut acceptor = listener.listen().unwrap();
println!("{} listening on {} (serving from: {}).",
SERVER_NAME, addr, www_dir_path_str.as_str().unwrap());
for stream_raw in acceptor.incoming() { //for each stream/connection
let (queue_tx, queue_rx) = channel();//build up a channel for sub thread
queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread
let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver
let stream_map_arc = stream_map_arc.clone();
let visitor_count=visitor_count.clone();
println!("outer thread:{}",*visitor_count.lock().unwrap());
// Spawn a task to handle the connection.
Thread::spawn(move|| {
let mut vc= visitor_count.lock().unwrap(); // Done
*vc+=1;
println!("inner thread:{}",*vc);
let request_queue_arc = queue_rx.recv().unwrap();//
let mut stream = match stream_raw {
Ok(s) => {s}
Err(e) => { panic!("Error getting the listener stream! {}", e) }
};
let peer_name = WebServer::get_peer_name(&mut stream);
debug!("Got connection from {}", peer_name);
let mut buf: [u8;500] = [0;500];
stream.read(&mut buf);
let request_str = match str::from_utf8(&buf){
Ok(s) => s,
Err(e)=> panic!("Error reading from the listener stream! {}", e),
};
debug!("Request:\n{}", request_str);
//WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);
let req_group: Vec<&str> = request_str.splitn(3,'').collect();
if req_group.len() > 2 {
let path_str = ".".to_string() + req_group[1];
let mut path_obj = os::getcwd().unwrap();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{}]", path_str);
if path_str.as_slice().eq("./") {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream,*vc);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{
WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);}
else{
debug!("small file, do it without enqueue!");
let mut file_reader = File::open(&path_obj).unwrap();
stream.write(HTTP_OK.as_bytes());
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
} | });
}
});
}
fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
let mut stream = stream;
let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path"));
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// Done
fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) {
let mut stream = stream;
let response: String =
format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
unsafe { visitor_count } );
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) {
let mut stream = stream;
let mut cache_str=String::new();
let mut counter=0;
let mut local_cache=cache.clone();
let mut is_modified=false;
{
let metadata=std::fs::metadata(path).unwrap();
let modify_time=metadata.modified();
let read_hash=local_cache.read().unwrap();
if read_hash.contains_key(path)
{
let tuple=read_hash.get(path).unwrap();
let time = tuple.2;
is_modified= (time!=modify_time);
}
}
if is_modified{
debug!("It is modified, delete from cache!");
let mut write_hash=local_cache.write().unwrap();
write_hash.remove(path);
}
//let mut local_cache=cache.clone();
{
debug!("updating counter...");
let read_hash=local_cache.read().unwrap();
for (key,value) in read_hash.iter(){
let mut counter=value.1.lock().unwrap();
*counter+=1;
}
if read_hash.contains_key(path){
debug!("Reading cached file:{}",path.display());
let mut pair=read_hash.get(path).unwrap();
{
*pair.1.lock().unwrap()=0;
}
stream.write(HTTP_OK.as_bytes());
let _ = stream.write_all(pair.0.as_bytes());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write_hash=local_cache.write().unwrap();
let mut to_be_replaced : Path=Path::new("./");
if *cache_len.lock().unwrap()>5{
let mut max_num=0;
//let read_hash=local_cache.write().unwrap();
let mut tmp: &Path=&Path::new("./");
for (key,value) in write_hash.iter(){
let num=*value.1.lock().unwrap();
if num>=max_num{
max_num=num;
tmp=key;
}
}
to_be_replaced=tmp.clone();
}else
{
return;
}
debug!("least recently used is:{}",to_be_replaced.display());
write_hash.remove(&to_be_replaced);
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
//scan the shtml to find the ssl tag, extract the command line redirect the command line to
//our file and serve it
let mut stream = stream;
let mut file =match File::open(path)
{
Err(why) => panic!("Coundn't open file:{}",why),
Ok(file) => file,
};
let mut s= String::new();
s=match file.read_to_string(){
Err(why) => panic!("Couldn't read file:{}",why),
Ok(content) => content,
};
let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect();
let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect();
let cmd=cmd_mix[0].to_string();
let mut args =Vec::new();
args.push("-c");
args.push(&cmd);
let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){
Err(why) => panic!("Couldn't do command {}",why),
Ok(cmd) => cmd,
};
let mut stdout=gash_command.stdout.unwrap();
let mut output=String::new();
stdout.read_to_string(&mut output);
stream.write(HTTP_OK.as_bytes());
stream.write(str_vec[0].as_bytes());
stream.write(output.as_bytes());
stream.write(cmd_mix[1].as_bytes());
//WebServer::respond_with_static_file(stream, path);
}
fn get_file_size(path: &Path) ->u64 {
let metadata=std::fs::metadata(path).unwrap();
return metadata.len()
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_tx, stream_rx) = channel();
stream_tx.send(stream);
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let local_stream_map = stream_map_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_stream_map = local_stream_map.lock().unwrap();
local_stream_map.insert(peer_name.clone(), stream);
}
// Enqueue the HTTP request.
// TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed
let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() };
let (req_tx, req_rx) = channel();
req_tx.send(req);
debug!("Waiting for queue mutex lock.");
let local_req_queue = req_queue_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_req_queue = local_req_queue.lock().unwrap();
let req: HTTP_Request = match req_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
//REORDER the queue in order of the request size
local_req_queue.push(req);
local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path)));
debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len());
notify_chan.send(()); // Send incoming notification to responder task.
}
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
// Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx.
let (request_tx, request_rx) = channel();
loop {
self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut req_queue = req_queue_get.lock().unwrap();
if req_queue.len() > 0 {
self.thread_sema.acquire();
let req = req_queue.remove(0);
debug!("A new request dequeued, now the length of queue is {}.", req_queue.len());
request_tx.send(req);
|
}
}
} | random_line_split |
zhtta.rs | _path.clone();
let request_queue_arc = self.request_queue_arc.clone();
let notify_tx = self.notify_tx.clone();
let stream_map_arc = self.stream_map_arc.clone();
let visitor_count=self.visitor_count.clone();
Thread::spawn(move|| {
let listener = std::old_io::TcpListener::bind(addr.as_slice()).unwrap();
let mut acceptor = listener.listen().unwrap();
println!("{} listening on {} (serving from: {}).",
SERVER_NAME, addr, www_dir_path_str.as_str().unwrap());
for stream_raw in acceptor.incoming() { //for each stream/connection
let (queue_tx, queue_rx) = channel();//build up a channel for sub thread
queue_tx.send(request_queue_arc.clone());//send the request queue to queue and receive it inside the son thread
let notify_chan = notify_tx.clone();//notify_chan is a global channel for webserver
let stream_map_arc = stream_map_arc.clone();
let visitor_count=visitor_count.clone();
println!("outer thread:{}",*visitor_count.lock().unwrap());
// Spawn a task to handle the connection.
Thread::spawn(move|| {
let mut vc= visitor_count.lock().unwrap(); // Done
*vc+=1;
println!("inner thread:{}",*vc);
let request_queue_arc = queue_rx.recv().unwrap();//
let mut stream = match stream_raw {
Ok(s) => {s}
Err(e) => { panic!("Error getting the listener stream! {}", e) }
};
let peer_name = WebServer::get_peer_name(&mut stream);
debug!("Got connection from {}", peer_name);
let mut buf: [u8;500] = [0;500];
stream.read(&mut buf);
let request_str = match str::from_utf8(&buf){
Ok(s) => s,
Err(e)=> panic!("Error reading from the listener stream! {}", e),
};
debug!("Request:\n{}", request_str);
//WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);
let req_group: Vec<&str> = request_str.splitn(3,'').collect();
if req_group.len() > 2 {
let path_str = ".".to_string() + req_group[1];
let mut path_obj = os::getcwd().unwrap();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{}]", path_str);
if path_str.as_slice().eq("./") {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream,*vc);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{
WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);}
else{
debug!("small file, do it without enqueue!");
let mut file_reader = File::open(&path_obj).unwrap();
stream.write(HTTP_OK.as_bytes());
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
}
}
}
}
});
}
});
}
fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
let mut stream = stream;
let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path"));
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// Done
fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) {
let mut stream = stream;
let response: String =
format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
unsafe { visitor_count } );
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) {
let mut stream = stream;
let mut cache_str=String::new();
let mut counter=0;
let mut local_cache=cache.clone();
let mut is_modified=false;
{
let metadata=std::fs::metadata(path).unwrap();
let modify_time=metadata.modified();
let read_hash=local_cache.read().unwrap();
if read_hash.contains_key(path)
{
let tuple=read_hash.get(path).unwrap();
let time = tuple.2;
is_modified= (time!=modify_time);
}
}
if is_modified{
debug!("It is modified, delete from cache!");
let mut write_hash=local_cache.write().unwrap();
write_hash.remove(path);
}
//let mut local_cache=cache.clone();
{
debug!("updating counter...");
let read_hash=local_cache.read().unwrap();
for (key,value) in read_hash.iter(){
let mut counter=value.1.lock().unwrap();
*counter+=1;
}
if read_hash.contains_key(path){
debug!("Reading cached file:{}",path.display());
let mut pair=read_hash.get(path).unwrap();
{
*pair.1.lock().unwrap()=0;
}
stream.write(HTTP_OK.as_bytes());
let _ = stream.write_all(pair.0.as_bytes());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write_hash=local_cache.write().unwrap();
let mut to_be_replaced : Path=Path::new("./");
if *cache_len.lock().unwrap()>5{
let mut max_num=0;
//let read_hash=local_cache.write().unwrap();
let mut tmp: &Path=&Path::new("./");
for (key,value) in write_hash.iter(){
let num=*value.1.lock().unwrap();
if num>=max_num{
max_num=num;
tmp=key;
}
}
to_be_replaced=tmp.clone();
}else
{
return;
}
debug!("least recently used is:{}",to_be_replaced.display());
write_hash.remove(&to_be_replaced);
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
//scan the shtml to find the ssl tag, extract the command line redirect the command line to
//our file and serve it
let mut stream = stream;
let mut file =match File::open(path)
{
Err(why) => panic!("Coundn't open file:{}",why),
Ok(file) => file,
};
let mut s= String::new();
s=match file.read_to_string(){
Err(why) => panic!("Couldn't read file:{}",why),
Ok(content) => content,
};
let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect();
let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect();
let cmd=cmd_mix[0].to_string();
let mut args =Vec::new();
args.push("-c");
args.push(&cmd);
let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){
Err(why) => panic!("Couldn't do command {}",why),
Ok(cmd) => cmd,
};
let mut stdout=gash_command.stdout.unwrap();
let mut output=String::new();
stdout.read_to_string(&mut output);
stream.write(HTTP_OK.as_bytes());
stream.write(str_vec[0].as_bytes());
stream.write(output.as_bytes());
stream.write(cmd_mix[1].as_bytes());
//WebServer::respond_with_static_file(stream, path);
}
fn get_file_size(path: &Path) ->u64 {
let metadata=std::fs::metadata(path).unwrap();
return metadata.len()
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_tx, stream_rx) = channel();
stream_tx.send(stream);
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let local_stream_map = stream_map_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_stream_map = local_stream_map.lock().unwrap();
local_stream_map.insert(peer_name.clone(), stream);
}
// Enqueue the HTTP request.
// TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed
let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() };
let (req_tx, req_rx) = channel();
req_tx.send(req);
debug!("Waiting for queue mutex lock.");
let local_req_queue = req_queue_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_req_queue = local_req_queue.lock().unwrap();
let req: HTTP_Request = match req_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
//REORDER the queue in order of the request size
local_req_queue.push(req);
local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path)));
debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len());
notify_chan.send(()); // Send incoming notification to responder task.
}
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
// Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx.
let (request_tx, request_rx) = channel();
loop {
self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut req_queue = req_queue_get.lock().unwrap();
if req_queue.len() > 0 {
self.thread_sema.acquire();
let req = req_queue.remove(0);
debug!("A new request dequeued, now the length of queue is {}.", req_queue.len());
request_tx.send(req);
}
}
let request = match request_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
// Get stream from hashmap.
let (stream_tx, stream_rx) = channel();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut stream_map = stream_map_get.lock().unwrap();
let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream");
stream_tx.send(stream);
}
// TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency.
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let sema=self.thread_sema.clone();
let cache_len=self.cache_len.clone();
let mut cache=self.cache.clone();
Thread::spawn(move||{
debug!("Processing....");
WebServer::respond_with_static_file(stream, &request.path,cache,cache_len);
debug!("finishing request for{}",request.path.display());
debug!("=====Terminated connection from [{}].=====", request.peer_name);
sema.release();
});
}
}
fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{
match stream.peer_name(){
Ok(s) => {format!("{}:{}", s.ip, s.port)}
Err(e) => {panic!("Error while getting the stream name! {}", e)}
}
}
}
fn get_args() -> (String, usize, String) {
fn pr | int_usage(p | identifier_name |
|
zhtta.rs | Thread::spawn(move|| {
let mut vc= visitor_count.lock().unwrap(); // Done
*vc+=1;
println!("inner thread:{}",*vc);
let request_queue_arc = queue_rx.recv().unwrap();//
let mut stream = match stream_raw {
Ok(s) => {s}
Err(e) => { panic!("Error getting the listener stream! {}", e) }
};
let peer_name = WebServer::get_peer_name(&mut stream);
debug!("Got connection from {}", peer_name);
let mut buf: [u8;500] = [0;500];
stream.read(&mut buf);
let request_str = match str::from_utf8(&buf){
Ok(s) => s,
Err(e)=> panic!("Error reading from the listener stream! {}", e),
};
debug!("Request:\n{}", request_str);
//WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);
let req_group: Vec<&str> = request_str.splitn(3,'').collect();
if req_group.len() > 2 {
let path_str = ".".to_string() + req_group[1];
let mut path_obj = os::getcwd().unwrap();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{}]", path_str);
if path_str.as_slice().eq("./") {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream,*vc);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, &path_obj);
debug!("=====Terminated connection from [{}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
if std::fs::metadata(&path_obj).unwrap().len()> CacheLowerBounder{
WebServer::enqueue_static_file_request(stream, &path_obj, stream_map_arc, request_queue_arc, notify_chan);}
else{
debug!("small file, do it without enqueue!");
let mut file_reader = File::open(&path_obj).unwrap();
stream.write(HTTP_OK.as_bytes());
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
}
}
}
}
});
}
});
}
fn respond_with_error_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
let mut stream = stream;
let msg: String= format!("Cannot open: {}", path.as_str().expect("invalid path"));
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// Done
fn respond_with_counter_page(stream: std::old_io::net::tcp::TcpStream,visitor_count:usize) {
let mut stream = stream;
let response: String =
format!("{}{}<h1>Greetings, Krusty!</h1><h2>Visitor count: {}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
unsafe { visitor_count } );
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: std::old_io::net::tcp::TcpStream, path: &Path,cache : Arc<RwLock<HashMap<Path,(String,Mutex<usize>,u64)>>>,cache_len :Arc<Mutex<usize>>) {
let mut stream = stream;
let mut cache_str=String::new();
let mut counter=0;
let mut local_cache=cache.clone();
let mut is_modified=false;
{
let metadata=std::fs::metadata(path).unwrap();
let modify_time=metadata.modified();
let read_hash=local_cache.read().unwrap();
if read_hash.contains_key(path)
{
let tuple=read_hash.get(path).unwrap();
let time = tuple.2;
is_modified= (time!=modify_time);
}
}
if is_modified{
debug!("It is modified, delete from cache!");
let mut write_hash=local_cache.write().unwrap();
write_hash.remove(path);
}
//let mut local_cache=cache.clone();
{
debug!("updating counter...");
let read_hash=local_cache.read().unwrap();
for (key,value) in read_hash.iter(){
let mut counter=value.1.lock().unwrap();
*counter+=1;
}
if read_hash.contains_key(path){
debug!("Reading cached file:{}",path.display());
let mut pair=read_hash.get(path).unwrap();
{
*pair.1.lock().unwrap()=0;
}
stream.write(HTTP_OK.as_bytes());
let _ = stream.write_all(pair.0.as_bytes());
return;
}
else{
debug!("reading from disk!");
let mut file_reader = File::open(path).unwrap();
stream.write(HTTP_OK.as_bytes());
/*let mut buf:[u8;1048576]=[0;1048576];
loop{
let size=match file_reader.read(&mut buf){
Err(why) =>0,
Ok(size) =>size,
};
let str_buf=String::from_utf8_lossy(&buf[0..size]);
let _=stream.write(str_buf.as_bytes());
cache_str.push_str(str_buf.as_slice());
debug!("read siez:{}",size);
if(size<1048576){
break;
}
}*/
//better solution
let mut reader = BufferedReader::new(file_reader);
for line in reader.lines().filter_map(|result| result.ok()) {
let _ = stream.write_all(line.as_bytes());
cache_str.push_str(line.as_slice());
}
}
}
let file_size=std::fs::metadata(path).unwrap().len();
if(file_size<CacheLowerBounder){
debug!("file size:{}, don't cache this file(too small)",file_size);
return;
}
else if (file_size>CacheUpperBounder){
debug!("file size:{}, don't cache this file(too large)",file_size);
return;
}
debug!("updating cache....");
{
let mut write_hash=local_cache.write().unwrap();
let time=std::fs::metadata(path).unwrap().modified();
write_hash.insert(path.clone(),(cache_str,Mutex::new(0),time));
}
*cache_len.lock().unwrap()+=1;
{
let mut write_hash=local_cache.write().unwrap();
let mut to_be_replaced : Path=Path::new("./");
if *cache_len.lock().unwrap()>5{
let mut max_num=0;
//let read_hash=local_cache.write().unwrap();
let mut tmp: &Path=&Path::new("./");
for (key,value) in write_hash.iter(){
let num=*value.1.lock().unwrap();
if num>=max_num{
max_num=num;
tmp=key;
}
}
to_be_replaced=tmp.clone();
}else
{
return;
}
debug!("least recently used is:{}",to_be_replaced.display());
write_hash.remove(&to_be_replaced);
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: std::old_io::net::tcp::TcpStream, path: &Path) {
//scan the shtml to find the ssl tag, extract the command line redirect the command line to
//our file and serve it
let mut stream = stream;
let mut file =match File::open(path)
{
Err(why) => panic!("Coundn't open file:{}",why),
Ok(file) => file,
};
let mut s= String::new();
s=match file.read_to_string(){
Err(why) => panic!("Couldn't read file:{}",why),
Ok(content) => content,
};
let str_vec: Vec<&str>=s.split_str("<!--#exec cmd=\"").collect();
let cmd_mix:Vec<&str>=str_vec[1].split_str("\" -->").collect();
let cmd=cmd_mix[0].to_string();
let mut args =Vec::new();
args.push("-c");
args.push(&cmd);
let mut gash_command= match Command::new("../main").args(&args).stdout(Stdio::capture()).spawn(){
Err(why) => panic!("Couldn't do command {}",why),
Ok(cmd) => cmd,
};
let mut stdout=gash_command.stdout.unwrap();
let mut output=String::new();
stdout.read_to_string(&mut output);
stream.write(HTTP_OK.as_bytes());
stream.write(str_vec[0].as_bytes());
stream.write(output.as_bytes());
stream.write(cmd_mix[1].as_bytes());
//WebServer::respond_with_static_file(stream, path);
}
fn get_file_size(path: &Path) ->u64 {
let metadata=std::fs::metadata(path).unwrap();
return metadata.len()
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: std::old_io::net::tcp::TcpStream, path_obj: &Path, stream_map_arc: Arc<Mutex<HashMap<String, std::old_io::net::tcp::TcpStream>>>, req_queue_arc: Arc<Mutex<Vec<HTTP_Request>>>, notify_chan: Sender<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_tx, stream_rx) = channel();
stream_tx.send(stream);
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let local_stream_map = stream_map_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_stream_map = local_stream_map.lock().unwrap();
local_stream_map.insert(peer_name.clone(), stream);
}
// Enqueue the HTTP request.
// TOCHECK: it was ~path_obj.clone(), make sure in which order are ~ and clone() executed
let req = HTTP_Request { peer_name: peer_name.clone(), path: path_obj.clone() };
let (req_tx, req_rx) = channel();
req_tx.send(req);
debug!("Waiting for queue mutex lock.");
let local_req_queue = req_queue_arc.clone();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut local_req_queue = local_req_queue.lock().unwrap();
let req: HTTP_Request = match req_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
//REORDER the queue in order of the request size
local_req_queue.push(req);
local_req_queue.sort_by(|a, b| WebServer::get_file_size(&a.path).cmp(&WebServer::get_file_size(&b.path)));
debug!("A new request enqueued, now the length of queue is {}.", local_req_queue.len());
notify_chan.send(()); // Send incoming notification to responder task.
}
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
// Receiver<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_rx.
let (request_tx, request_rx) = channel();
loop {
self.notify_rx.recv(); // waiting for new request enqueued. This is where the infinity loop locate
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut req_queue = req_queue_get.lock().unwrap();
if req_queue.len() > 0 {
self.thread_sema.acquire();
let req = req_queue.remove(0);
debug!("A new request dequeued, now the length of queue is {}.", req_queue.len());
request_tx.send(req);
}
}
let request = match request_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the request channel! {}", e),
};
// Get stream from hashmap.
let (stream_tx, stream_rx) = channel();
{ // make sure we request the lock inside a block with different scope, so that we give it back at the end of that block
let mut stream_map = stream_map_get.lock().unwrap();
let stream = stream_map.remove(&request.peer_name).expect("no option tcpstream");
stream_tx.send(stream);
}
// TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency.
let stream = match stream_rx.recv(){
Ok(s) => s,
Err(e) => panic!("There was an error while receiving from the stream channel! {}", e),
};
let sema=self.thread_sema.clone();
let cache_len=self.cache_len.clone();
let mut cache=self.cache.clone();
Thread::spawn(move||{
debug!("Processing....");
WebServer::respond_with_static_file(stream, &request.path,cache,cache_len);
debug!("finishing request for{}",request.path.display());
debug!("=====Terminated connection from [{}].=====", request.peer_name);
sema.release();
});
}
}
fn get_peer_name(stream: &mut std::old_io::net::tcp::TcpStream) -> String{
match stream.peer_name(){
Ok(s) => {format!("{}:{}", s.ip, s.port)}
Err(e) => {panic!("Error while getting the stream name! {}", e)}
}
}
}
fn get_args() -> (String, usize, String) {
fn print_usage(program: &str) {
println!("Usage: {} [options]", program);
println!("--ip \tIP address, \"{}\" by default.", IP);
println!("--port \tport number, \"{}\" by default.", PORT);
println!("--www \tworking directory, \"{}\" by default", WWW_DIR);
println!("-h --help \tUsage");
}
/* Begin processing program arguments and initiate the parameters. */
let args = os::args();
let program = args[0].clone();
let opts = [
getopts::optopt("", "ip", "The IP address to bind to", "IP"),
getopts::optopt("", "port", "The Port to bind to", "PORT"),
getopts::optopt("", "www", "The www directory", "WWW_DIR"),
getopts::optflag("h", "help", "Display help"),
];
let matches = match getopts::getopts(args.tail(), &opts) {
Ok(m) => { m }
Err(f) => { panic!(f.to_err_msg()) }
};
if matches.opt_present("h") || matches.opt_present("help") {
print_usage(program.as_slice());
unsafe { libc::exit(1); }
}
let ip_str = if matches.opt_present("ip") {
| matches.opt_str("ip").expect("invalid ip address?").to_owned()
} e | conditional_block |
|
deps.rs | use std::{
collections::{HashMap, HashSet},
path::PathBuf,
};
use crate::{
error::{Error, Result, UserError},
target::Target,
util::ResultIterator,
};
use daggy::{petgraph::visit::IntoNeighborsDirected, NodeIndex as Nx};
type DependencyDag = daggy::Dag<Node, ()>;
type Identifier = PathBuf;
type FileState = (); // TODO
#[derive(Debug)]
enum Node {
Target(Target),
NoRule(Identifier),
}
// TODO think of a better name
pub struct DependencyGraph {
id_to_ix_map: HashMap<Identifier, Nx>,
graph: DependencyDag,
}
impl DependencyGraph {
pub fn construct(targets: Vec<Target>) -> Result<Self> {
let mut graph = DependencyDag::new();
let mut id_to_ix_map = HashMap::new();
// add target nodes
targets
.iter()
.cloned()
.map(|target| {
util::add_target_node(&mut graph, &mut id_to_ix_map, target)
})
.collect::<Result<_>>()?;
// add left dependency nodes - leaf nodes representing actual files
targets
.into_iter()
.map(|target| target.deps)
.flatten()
.for_each(|dep_id| {
util::add_leaf_node(&mut graph, &mut id_to_ix_map, dep_id);
});
// add edges
graph
.graph()
.node_indices()
.map(|target_ix| {
util::add_edges_to_deps(&mut graph, &id_to_ix_map, target_ix)
})
.collect::<Result<_>>()?;
Ok(Self {
graph,
id_to_ix_map,
})
}
pub fn get_target_sequence(
&self,
target_id: Identifier,
) -> Result<Vec<Target>> {
let graph = &self.graph;
let target_ix = *self
.id_to_ix_map
.get(&target_id)
.ok_or_else(|| UserError::NoSuchTarget(target_id))?;
let depth_map = util::generate_depth_map(graph, target_ix);
let obsolete_leaf_nodes =
util::find_obsolete_leaf_nodes(graph.graph())?;
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets)
}
}
mod util {
use std::collections::{HashMap, VecDeque};
use super::*;
use daggy::petgraph;
use petgraph::prelude::{Direction, Graph};
pub(super) fn get_target_sequence(
graph: &Graph<Node, ()>,
depth_map: &HashMap<Nx, usize>,
obsolete_targets: &HashSet<Nx>,
) -> Result<Vec<Target>> {
// filter out targets which are not in the
// dependency graph of the chosen target
// and sort the targets left by depth in **decreasing** order
let mut obsolete_targets = obsolete_targets
.iter()
.filter(|ix| depth_map.contains_key(ix))
.copied()
.collect::<Vec<_>>();
obsolete_targets.sort_by_key(|ix| depth_map[ix]);
obsolete_targets.reverse();
obsolete_targets
.into_iter()
.map(|target_ix| match &graph[target_ix] {
Node::Target(target) => Ok(target.clone()),
Node::NoRule(_) => Err(Error::internal(line!(), file!())),
})
.collect::<Result<Vec<_>>>()
}
/// This function finds all nodes that have no dependencies -
/// both actual files (`NoRule` variant) and targets
/// (`Target` variant with no dependencies is assumed to depend
/// on other factors - time, environmental variables,
/// current directory etc.).
pub(super) fn find_obsolete_leaf_nodes(
graph: &Graph<Node, ()>,
) -> Result<HashSet<Nx>> {
graph
.externals(Direction::Outgoing) // get nodes with no outgoing edges
.filter_map(|node_ix| match &graph[node_ix] {
// TODO filter `requires_rebuilding`
Node::Target(_target) => Some(Ok(node_ix)),
Node::NoRule(identifier) => {
// TODO clean up this part
let previous_file_state = ();
let result = has_file_been_modified(
&identifier,
previous_file_state,
);
match result {
Ok(has_been_modified) =>
if has_been_modified {
Some(Ok(node_ix))
} else {
None
},
Err(err) => Some(Err(err)),
}
}
})
.collect::<Result<HashSet<_>>>()
}
pub(super) fn find_obsolete_targets(
graph: &Graph<Node, ()>,
obsolete_leaf_nodes: &HashSet<Nx>,
) -> HashSet<Nx> {
// reverse short circuiting bfs:
// skip the dependants of the targets
// that have already been marked as obsolete
let mut queue = VecDeque::<Nx>::new();
let mut obsolete_ixs = HashSet::<Nx>::new();
for leaf_ix in obsolete_leaf_nodes {
// no need to clear the queue since it gets drained
// in the while loop each time
match &graph[*leaf_ix] {
Node::Target(_) => queue.push_back(*leaf_ix),
Node::NoRule(_) => {
let direct_dependants =
graph.neighbors_directed(*leaf_ix, Direction::Incoming);
queue.extend(direct_dependants);
}
}
while let Some(target_ix) = queue.pop_front() {
let has_just_been_found = obsolete_ixs.insert(target_ix);
if has_just_been_found {
let dependants = graph
.neighbors_directed(target_ix, Direction::Incoming);
queue.extend(dependants);
}
}
}
obsolete_ixs
}
pub(super) fn | (
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
dependency_identifier: Identifier,
) {
id_to_ix_map
.entry(dependency_identifier.clone())
.or_insert_with(|| {
// `.add_node()` returns node's index
graph.add_node(Node::NoRule(dependency_identifier))
});
}
pub(super) fn add_target_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
target: Target,
) -> Result<()> {
let identifier = target.identifier.clone();
let node_ix = graph.add_node(Node::Target(target));
let slot = id_to_ix_map.insert(identifier, node_ix);
match slot {
Some(_colliding_target_ix) =>
Err(UserError::DuplicateTarget.into()),
None => Ok(()),
}
}
pub(super) fn add_edges_to_deps(
graph: &mut DependencyDag,
id_to_ix_map: &HashMap<Identifier, Nx>,
target_ix: Nx,
) -> Result<()> {
let deps = match &graph[target_ix] {
Node::Target(target) => target.deps.clone(),
Node::NoRule(_) => return Ok(()), // no deps
};
deps.iter()
.map(|dep_id| {
id_to_ix_map
.get(dep_id)
.ok_or_else(|| Error::internal(line!(), file!()))
})
.map_item(|dep_ix| {
graph
.add_edge(target_ix, *dep_ix, ())
.map(|_| ())
.map_err(|_| UserError::DependencyCycle.into())
})
.map(|result| result.flatten())
.collect::<Result<_>>()
}
pub(super) fn has_file_been_modified(
_identifier: &Identifier,
_previous_state: FileState,
) -> Result<bool> {
Ok(true) // TODO for now it just rebuilds everything
}
pub(super) fn generate_depth_map<N, E>(
graph: &daggy::Dag<N, E>,
target_id: Nx,
) -> HashMap<Nx, usize> {
let mut depth_map: HashMap<Nx, usize> = HashMap::new();
let mut current_depth = 0;
let mut queue: VecDeque<Vec<_>> = VecDeque::new();
queue.push_front(vec![target_id]);
while let Some(level) = queue.pop_front() {
if level.is_empty() {
break;
}
let mut level_queue = vec![];
for current_node in level {
// update current node's depth
let _ = depth_map
.entry(current_node)
.and_modify(|depth| *depth = (*depth).max(current_depth))
.or_insert(current_depth);
let children =
graph.neighbors_directed(current_node, Direction::Outgoing);
level_queue.extend(children);
}
queue.push_back(level_queue);
current_depth += 1;
}
depth_map
}
}
#[cfg(test)]
mod test {
use super::*;
use daggy::petgraph::graph::node_index as n;
use pretty_assertions::assert_eq;
#[test]
fn test_get_task_sequence() {
// helper functions
let task = |cmd: &str| Task {
command: cmd.into(),
working_dir: None,
};
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![task(id)],
working_dir: None,
};
let ix = |id: &str, map: &HashMap<_, _>| {
let p: &std::path::Path = id.as_ref();
map[p]
};
// the dependency graph:
//
// a1 a2'
// / / \
// / / \
// b1 b2 b3
// / /
// / /
// l1* l2
//
// a2 is the target (')
// l1 is marked as obsolete (*)
// b2's and a2's tasks must be executed (in that order)
// targets and their dependencies
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1"]),
target("a2", &["b2", "b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
// depth map
#[rustfmt::skip]
let depth_map = vec![
(ix("a2", &map), 0),
(ix("b2", &map), 1),
(ix("b3", &map), 1),
(ix("l1", &map), 2),
(ix("l2", &map), 2),
].into_iter().collect();
// nodes that have been marked as obsolete
// (in real code it is automated)
let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect();
// get the sequence of tasks that must be executed
// in specific order
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
let target_sequence = util::get_target_sequence(
graph.graph(),
&depth_map,
&obsolete_targets,
)
.unwrap();
let target_sequence = target_sequence
.into_iter()
.map(|target| target.identifier)
.collect::<Vec<_>>();
let expected_target_sequence: Vec<PathBuf> =
vec!["b2".into(), "a2".into()];
assert_eq!(target_sequence, expected_target_sequence);
}
#[test]
fn test_find_obsolete_targets() {
// helper functions
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![],
working_dir: None,
};
let ixs = |ids: &[&str], map: &HashMap<_, Nx>| {
ids.iter()
.map(|id| map[&Into::<PathBuf>::into(id)])
.collect()
};
// the dependency graph:
//
// a1 a2
// / \ \
// / \ \
// b1 b2 b3
// / /
// / /
// l1* l2*
//
// l1 and l2 are marked as obsolete
// the function should find b2, a1, b3 & a2
// but not b1
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1", "b2"]),
target("a2", &["b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
let obsolete_leaf_nodes = ixs(&["l1", "l2"], &map);
let found_targets =
util::find_obsolete_targets(&graph.graph(), &obsolete_leaf_nodes);
let expected_targets = ixs(&["a1", "a2", "b2", "b3"], &map);
assert_eq!(found_targets, expected_targets);
}
#[test]
fn test_generate_depth_map() {
// depth is the length of the longest path from
// the target node to the dependency
#[rustfmt::skip]
let graph: daggy::Dag<(), ()> = daggy::Dag::from_edges(&[
(0, 3), (0, 4),
(1, 3), (1, 4), (1, 6),
(2, 3), (2, 4),
(3, 5), (3, 6), (3, 7),
(4, 5), (4, 6), (4, 7),
(7, 8),
(8, 9),
]).unwrap();
let target = n(1); // target
let depth_map = util::generate_depth_map(&graph, target);
assert!(depth_map.get(&n(0)).is_none());
assert!(depth_map.get(&n(2)).is_none());
assert_eq!(depth_map[&n(1)], 0);
assert_eq!(depth_map[&n(3)], 1);
assert_eq!(depth_map[&n(4)], 1);
assert_eq!(depth_map[&n(5)], 2);
assert_eq!(depth_map[&n(6)], 2);
assert_eq!(depth_map[&n(7)], 2);
assert_eq!(depth_map[&n(8)], 3);
assert_eq!(depth_map[&n(9)], 4);
}
}
| add_leaf_node | identifier_name |
deps.rs | use std::{
collections::{HashMap, HashSet},
path::PathBuf,
};
use crate::{
error::{Error, Result, UserError},
target::Target,
util::ResultIterator,
};
use daggy::{petgraph::visit::IntoNeighborsDirected, NodeIndex as Nx};
type DependencyDag = daggy::Dag<Node, ()>;
type Identifier = PathBuf;
type FileState = (); // TODO
#[derive(Debug)]
enum Node {
Target(Target),
NoRule(Identifier),
}
// TODO think of a better name
pub struct DependencyGraph {
id_to_ix_map: HashMap<Identifier, Nx>,
graph: DependencyDag,
}
impl DependencyGraph {
pub fn construct(targets: Vec<Target>) -> Result<Self> {
let mut graph = DependencyDag::new();
let mut id_to_ix_map = HashMap::new();
// add target nodes
targets
.iter()
.cloned()
.map(|target| {
util::add_target_node(&mut graph, &mut id_to_ix_map, target)
})
.collect::<Result<_>>()?;
// add left dependency nodes - leaf nodes representing actual files
targets
.into_iter()
.map(|target| target.deps)
.flatten()
.for_each(|dep_id| {
util::add_leaf_node(&mut graph, &mut id_to_ix_map, dep_id);
});
// add edges
graph
.graph()
.node_indices()
.map(|target_ix| {
util::add_edges_to_deps(&mut graph, &id_to_ix_map, target_ix)
})
.collect::<Result<_>>()?;
Ok(Self {
graph,
id_to_ix_map,
})
}
pub fn get_target_sequence(
&self,
target_id: Identifier,
) -> Result<Vec<Target>> {
let graph = &self.graph;
let target_ix = *self
.id_to_ix_map
.get(&target_id)
.ok_or_else(|| UserError::NoSuchTarget(target_id))?;
let depth_map = util::generate_depth_map(graph, target_ix);
let obsolete_leaf_nodes =
util::find_obsolete_leaf_nodes(graph.graph())?;
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets)
}
}
mod util {
use std::collections::{HashMap, VecDeque};
use super::*;
use daggy::petgraph;
use petgraph::prelude::{Direction, Graph};
pub(super) fn get_target_sequence(
graph: &Graph<Node, ()>,
depth_map: &HashMap<Nx, usize>,
obsolete_targets: &HashSet<Nx>,
) -> Result<Vec<Target>> {
// filter out targets which are not in the
// dependency graph of the chosen target
// and sort the targets left by depth in **decreasing** order
let mut obsolete_targets = obsolete_targets
.iter()
.filter(|ix| depth_map.contains_key(ix))
.copied()
.collect::<Vec<_>>();
obsolete_targets.sort_by_key(|ix| depth_map[ix]);
obsolete_targets.reverse();
obsolete_targets
.into_iter()
.map(|target_ix| match &graph[target_ix] {
Node::Target(target) => Ok(target.clone()),
Node::NoRule(_) => Err(Error::internal(line!(), file!())),
})
.collect::<Result<Vec<_>>>()
}
/// This function finds all nodes that have no dependencies -
/// both actual files (`NoRule` variant) and targets
/// (`Target` variant with no dependencies is assumed to depend
/// on other factors - time, environmental variables,
/// current directory etc.).
pub(super) fn find_obsolete_leaf_nodes(
graph: &Graph<Node, ()>,
) -> Result<HashSet<Nx>> {
graph
.externals(Direction::Outgoing) // get nodes with no outgoing edges
.filter_map(|node_ix| match &graph[node_ix] {
// TODO filter `requires_rebuilding`
Node::Target(_target) => Some(Ok(node_ix)),
Node::NoRule(identifier) => {
// TODO clean up this part
let previous_file_state = ();
let result = has_file_been_modified(
&identifier,
previous_file_state,
);
match result {
Ok(has_been_modified) =>
if has_been_modified {
Some(Ok(node_ix))
} else {
None
},
Err(err) => Some(Err(err)),
}
}
})
.collect::<Result<HashSet<_>>>()
}
pub(super) fn find_obsolete_targets(
graph: &Graph<Node, ()>,
obsolete_leaf_nodes: &HashSet<Nx>,
) -> HashSet<Nx> {
// reverse short circuiting bfs:
// skip the dependants of the targets
// that have already been marked as obsolete
let mut queue = VecDeque::<Nx>::new();
let mut obsolete_ixs = HashSet::<Nx>::new();
for leaf_ix in obsolete_leaf_nodes {
// no need to clear the queue since it gets drained
// in the while loop each time
match &graph[*leaf_ix] {
Node::Target(_) => queue.push_back(*leaf_ix),
Node::NoRule(_) => {
let direct_dependants =
graph.neighbors_directed(*leaf_ix, Direction::Incoming);
queue.extend(direct_dependants);
}
}
while let Some(target_ix) = queue.pop_front() {
let has_just_been_found = obsolete_ixs.insert(target_ix);
if has_just_been_found {
let dependants = graph
.neighbors_directed(target_ix, Direction::Incoming);
queue.extend(dependants);
}
}
}
obsolete_ixs
}
pub(super) fn add_leaf_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
dependency_identifier: Identifier,
) {
id_to_ix_map
.entry(dependency_identifier.clone())
.or_insert_with(|| {
// `.add_node()` returns node's index
graph.add_node(Node::NoRule(dependency_identifier))
});
}
pub(super) fn add_target_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
target: Target,
) -> Result<()> {
let identifier = target.identifier.clone();
let node_ix = graph.add_node(Node::Target(target));
let slot = id_to_ix_map.insert(identifier, node_ix);
match slot {
Some(_colliding_target_ix) =>
Err(UserError::DuplicateTarget.into()),
None => Ok(()),
}
}
pub(super) fn add_edges_to_deps(
graph: &mut DependencyDag,
id_to_ix_map: &HashMap<Identifier, Nx>,
target_ix: Nx,
) -> Result<()> {
let deps = match &graph[target_ix] {
Node::Target(target) => target.deps.clone(),
Node::NoRule(_) => return Ok(()), // no deps
};
deps.iter()
.map(|dep_id| {
id_to_ix_map
.get(dep_id)
.ok_or_else(|| Error::internal(line!(), file!()))
})
.map_item(|dep_ix| {
graph
.add_edge(target_ix, *dep_ix, ())
.map(|_| ())
.map_err(|_| UserError::DependencyCycle.into())
})
.map(|result| result.flatten())
.collect::<Result<_>>()
}
pub(super) fn has_file_been_modified(
_identifier: &Identifier,
_previous_state: FileState,
) -> Result<bool> {
Ok(true) // TODO for now it just rebuilds everything
}
pub(super) fn generate_depth_map<N, E>(
graph: &daggy::Dag<N, E>,
target_id: Nx,
) -> HashMap<Nx, usize> {
let mut depth_map: HashMap<Nx, usize> = HashMap::new();
let mut current_depth = 0;
let mut queue: VecDeque<Vec<_>> = VecDeque::new();
queue.push_front(vec![target_id]);
while let Some(level) = queue.pop_front() {
if level.is_empty() {
break;
}
let mut level_queue = vec![];
for current_node in level {
// update current node's depth
let _ = depth_map
.entry(current_node)
.and_modify(|depth| *depth = (*depth).max(current_depth))
.or_insert(current_depth);
let children =
graph.neighbors_directed(current_node, Direction::Outgoing);
level_queue.extend(children);
}
queue.push_back(level_queue);
current_depth += 1;
}
depth_map
}
}
#[cfg(test)]
mod test {
use super::*;
use daggy::petgraph::graph::node_index as n;
use pretty_assertions::assert_eq;
#[test]
fn test_get_task_sequence() {
// helper functions
let task = |cmd: &str| Task {
command: cmd.into(),
working_dir: None,
};
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![task(id)],
working_dir: None,
};
let ix = |id: &str, map: &HashMap<_, _>| {
let p: &std::path::Path = id.as_ref();
map[p]
};
// the dependency graph:
//
// a1 a2'
// / / \
// / / \
// b1 b2 b3
// / /
// / /
// l1* l2
//
// a2 is the target (')
// l1 is marked as obsolete (*)
// b2's and a2's tasks must be executed (in that order)
// targets and their dependencies
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1"]),
target("a2", &["b2", "b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
// depth map
#[rustfmt::skip]
let depth_map = vec![
(ix("a2", &map), 0),
(ix("b2", &map), 1),
(ix("b3", &map), 1),
(ix("l1", &map), 2),
(ix("l2", &map), 2),
].into_iter().collect();
// nodes that have been marked as obsolete
// (in real code it is automated)
let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect();
// get the sequence of tasks that must be executed
// in specific order
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
let target_sequence = util::get_target_sequence(
graph.graph(),
&depth_map,
&obsolete_targets,
)
.unwrap();
let target_sequence = target_sequence
.into_iter() | .map(|target| target.identifier)
.collect::<Vec<_>>();
let expected_target_sequence: Vec<PathBuf> =
vec!["b2".into(), "a2".into()];
assert_eq!(target_sequence, expected_target_sequence);
}
#[test]
fn test_find_obsolete_targets() {
// helper functions
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![],
working_dir: None,
};
let ixs = |ids: &[&str], map: &HashMap<_, Nx>| {
ids.iter()
.map(|id| map[&Into::<PathBuf>::into(id)])
.collect()
};
// the dependency graph:
//
// a1 a2
// / \ \
// / \ \
// b1 b2 b3
// / /
// / /
// l1* l2*
//
// l1 and l2 are marked as obsolete
// the function should find b2, a1, b3 & a2
// but not b1
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1", "b2"]),
target("a2", &["b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
let obsolete_leaf_nodes = ixs(&["l1", "l2"], &map);
let found_targets =
util::find_obsolete_targets(&graph.graph(), &obsolete_leaf_nodes);
let expected_targets = ixs(&["a1", "a2", "b2", "b3"], &map);
assert_eq!(found_targets, expected_targets);
}
#[test]
fn test_generate_depth_map() {
// depth is the length of the longest path from
// the target node to the dependency
#[rustfmt::skip]
let graph: daggy::Dag<(), ()> = daggy::Dag::from_edges(&[
(0, 3), (0, 4),
(1, 3), (1, 4), (1, 6),
(2, 3), (2, 4),
(3, 5), (3, 6), (3, 7),
(4, 5), (4, 6), (4, 7),
(7, 8),
(8, 9),
]).unwrap();
let target = n(1); // target
let depth_map = util::generate_depth_map(&graph, target);
assert!(depth_map.get(&n(0)).is_none());
assert!(depth_map.get(&n(2)).is_none());
assert_eq!(depth_map[&n(1)], 0);
assert_eq!(depth_map[&n(3)], 1);
assert_eq!(depth_map[&n(4)], 1);
assert_eq!(depth_map[&n(5)], 2);
assert_eq!(depth_map[&n(6)], 2);
assert_eq!(depth_map[&n(7)], 2);
assert_eq!(depth_map[&n(8)], 3);
assert_eq!(depth_map[&n(9)], 4);
}
} | random_line_split |
|
deps.rs | use std::{
collections::{HashMap, HashSet},
path::PathBuf,
};
use crate::{
error::{Error, Result, UserError},
target::Target,
util::ResultIterator,
};
use daggy::{petgraph::visit::IntoNeighborsDirected, NodeIndex as Nx};
type DependencyDag = daggy::Dag<Node, ()>;
type Identifier = PathBuf;
type FileState = (); // TODO
#[derive(Debug)]
enum Node {
Target(Target),
NoRule(Identifier),
}
// TODO think of a better name
pub struct DependencyGraph {
id_to_ix_map: HashMap<Identifier, Nx>,
graph: DependencyDag,
}
impl DependencyGraph {
pub fn construct(targets: Vec<Target>) -> Result<Self> {
let mut graph = DependencyDag::new();
let mut id_to_ix_map = HashMap::new();
// add target nodes
targets
.iter()
.cloned()
.map(|target| {
util::add_target_node(&mut graph, &mut id_to_ix_map, target)
})
.collect::<Result<_>>()?;
// add left dependency nodes - leaf nodes representing actual files
targets
.into_iter()
.map(|target| target.deps)
.flatten()
.for_each(|dep_id| {
util::add_leaf_node(&mut graph, &mut id_to_ix_map, dep_id);
});
// add edges
graph
.graph()
.node_indices()
.map(|target_ix| {
util::add_edges_to_deps(&mut graph, &id_to_ix_map, target_ix)
})
.collect::<Result<_>>()?;
Ok(Self {
graph,
id_to_ix_map,
})
}
pub fn get_target_sequence(
&self,
target_id: Identifier,
) -> Result<Vec<Target>> {
let graph = &self.graph;
let target_ix = *self
.id_to_ix_map
.get(&target_id)
.ok_or_else(|| UserError::NoSuchTarget(target_id))?;
let depth_map = util::generate_depth_map(graph, target_ix);
let obsolete_leaf_nodes =
util::find_obsolete_leaf_nodes(graph.graph())?;
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
util::get_target_sequence(graph.graph(), &depth_map, &obsolete_targets)
}
}
mod util {
use std::collections::{HashMap, VecDeque};
use super::*;
use daggy::petgraph;
use petgraph::prelude::{Direction, Graph};
pub(super) fn get_target_sequence(
graph: &Graph<Node, ()>,
depth_map: &HashMap<Nx, usize>,
obsolete_targets: &HashSet<Nx>,
) -> Result<Vec<Target>> {
// filter out targets which are not in the
// dependency graph of the chosen target
// and sort the targets left by depth in **decreasing** order
let mut obsolete_targets = obsolete_targets
.iter()
.filter(|ix| depth_map.contains_key(ix))
.copied()
.collect::<Vec<_>>();
obsolete_targets.sort_by_key(|ix| depth_map[ix]);
obsolete_targets.reverse();
obsolete_targets
.into_iter()
.map(|target_ix| match &graph[target_ix] {
Node::Target(target) => Ok(target.clone()),
Node::NoRule(_) => Err(Error::internal(line!(), file!())),
})
.collect::<Result<Vec<_>>>()
}
/// This function finds all nodes that have no dependencies -
/// both actual files (`NoRule` variant) and targets
/// (`Target` variant with no dependencies is assumed to depend
/// on other factors - time, environmental variables,
/// current directory etc.).
pub(super) fn find_obsolete_leaf_nodes(
graph: &Graph<Node, ()>,
) -> Result<HashSet<Nx>> {
graph
.externals(Direction::Outgoing) // get nodes with no outgoing edges
.filter_map(|node_ix| match &graph[node_ix] {
// TODO filter `requires_rebuilding`
Node::Target(_target) => Some(Ok(node_ix)),
Node::NoRule(identifier) => {
// TODO clean up this part
let previous_file_state = ();
let result = has_file_been_modified(
&identifier,
previous_file_state,
);
match result {
Ok(has_been_modified) =>
if has_been_modified {
Some(Ok(node_ix))
} else {
None
},
Err(err) => Some(Err(err)),
}
}
})
.collect::<Result<HashSet<_>>>()
}
pub(super) fn find_obsolete_targets(
graph: &Graph<Node, ()>,
obsolete_leaf_nodes: &HashSet<Nx>,
) -> HashSet<Nx> | while let Some(target_ix) = queue.pop_front() {
let has_just_been_found = obsolete_ixs.insert(target_ix);
if has_just_been_found {
let dependants = graph
.neighbors_directed(target_ix, Direction::Incoming);
queue.extend(dependants);
}
}
}
obsolete_ixs
}
pub(super) fn add_leaf_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
dependency_identifier: Identifier,
) {
id_to_ix_map
.entry(dependency_identifier.clone())
.or_insert_with(|| {
// `.add_node()` returns node's index
graph.add_node(Node::NoRule(dependency_identifier))
});
}
pub(super) fn add_target_node(
graph: &mut DependencyDag,
id_to_ix_map: &mut HashMap<Identifier, Nx>,
target: Target,
) -> Result<()> {
let identifier = target.identifier.clone();
let node_ix = graph.add_node(Node::Target(target));
let slot = id_to_ix_map.insert(identifier, node_ix);
match slot {
Some(_colliding_target_ix) =>
Err(UserError::DuplicateTarget.into()),
None => Ok(()),
}
}
pub(super) fn add_edges_to_deps(
graph: &mut DependencyDag,
id_to_ix_map: &HashMap<Identifier, Nx>,
target_ix: Nx,
) -> Result<()> {
let deps = match &graph[target_ix] {
Node::Target(target) => target.deps.clone(),
Node::NoRule(_) => return Ok(()), // no deps
};
deps.iter()
.map(|dep_id| {
id_to_ix_map
.get(dep_id)
.ok_or_else(|| Error::internal(line!(), file!()))
})
.map_item(|dep_ix| {
graph
.add_edge(target_ix, *dep_ix, ())
.map(|_| ())
.map_err(|_| UserError::DependencyCycle.into())
})
.map(|result| result.flatten())
.collect::<Result<_>>()
}
pub(super) fn has_file_been_modified(
_identifier: &Identifier,
_previous_state: FileState,
) -> Result<bool> {
Ok(true) // TODO for now it just rebuilds everything
}
pub(super) fn generate_depth_map<N, E>(
graph: &daggy::Dag<N, E>,
target_id: Nx,
) -> HashMap<Nx, usize> {
let mut depth_map: HashMap<Nx, usize> = HashMap::new();
let mut current_depth = 0;
let mut queue: VecDeque<Vec<_>> = VecDeque::new();
queue.push_front(vec![target_id]);
while let Some(level) = queue.pop_front() {
if level.is_empty() {
break;
}
let mut level_queue = vec![];
for current_node in level {
// update current node's depth
let _ = depth_map
.entry(current_node)
.and_modify(|depth| *depth = (*depth).max(current_depth))
.or_insert(current_depth);
let children =
graph.neighbors_directed(current_node, Direction::Outgoing);
level_queue.extend(children);
}
queue.push_back(level_queue);
current_depth += 1;
}
depth_map
}
}
#[cfg(test)]
mod test {
use super::*;
use daggy::petgraph::graph::node_index as n;
use pretty_assertions::assert_eq;
#[test]
fn test_get_task_sequence() {
// helper functions
let task = |cmd: &str| Task {
command: cmd.into(),
working_dir: None,
};
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![task(id)],
working_dir: None,
};
let ix = |id: &str, map: &HashMap<_, _>| {
let p: &std::path::Path = id.as_ref();
map[p]
};
// the dependency graph:
//
// a1 a2'
// / / \
// / / \
// b1 b2 b3
// / /
// / /
// l1* l2
//
// a2 is the target (')
// l1 is marked as obsolete (*)
// b2's and a2's tasks must be executed (in that order)
// targets and their dependencies
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1"]),
target("a2", &["b2", "b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
// depth map
#[rustfmt::skip]
let depth_map = vec![
(ix("a2", &map), 0),
(ix("b2", &map), 1),
(ix("b3", &map), 1),
(ix("l1", &map), 2),
(ix("l2", &map), 2),
].into_iter().collect();
// nodes that have been marked as obsolete
// (in real code it is automated)
let obsolete_leaf_nodes = vec![ix("l1", &map)].into_iter().collect();
// get the sequence of tasks that must be executed
// in specific order
let obsolete_targets =
util::find_obsolete_targets(graph.graph(), &obsolete_leaf_nodes);
let target_sequence = util::get_target_sequence(
graph.graph(),
&depth_map,
&obsolete_targets,
)
.unwrap();
let target_sequence = target_sequence
.into_iter()
.map(|target| target.identifier)
.collect::<Vec<_>>();
let expected_target_sequence: Vec<PathBuf> =
vec!["b2".into(), "a2".into()];
assert_eq!(target_sequence, expected_target_sequence);
}
#[test]
fn test_find_obsolete_targets() {
// helper functions
let target = |id: &str, deps: &[&str]| Target {
identifier: id.into(),
deps: deps.iter().map(|d| d.into()).collect(),
tasks: vec![],
working_dir: None,
};
let ixs = |ids: &[&str], map: &HashMap<_, Nx>| {
ids.iter()
.map(|id| map[&Into::<PathBuf>::into(id)])
.collect()
};
// the dependency graph:
//
// a1 a2
// / \ \
// / \ \
// b1 b2 b3
// / /
// / /
// l1* l2*
//
// l1 and l2 are marked as obsolete
// the function should find b2, a1, b3 & a2
// but not b1
#[rustfmt::skip]
let targets = vec![
target("a1", &["b1", "b2"]),
target("a2", &["b3"]),
target("b2", &["l1"]),
target("b3", &["l2"]),
];
let DependencyGraph {
graph,
id_to_ix_map: map,
} = DependencyGraph::construct(targets).unwrap();
let obsolete_leaf_nodes = ixs(&["l1", "l2"], &map);
let found_targets =
util::find_obsolete_targets(&graph.graph(), &obsolete_leaf_nodes);
let expected_targets = ixs(&["a1", "a2", "b2", "b3"], &map);
assert_eq!(found_targets, expected_targets);
}
#[test]
fn test_generate_depth_map() {
// depth is the length of the longest path from
// the target node to the dependency
#[rustfmt::skip]
let graph: daggy::Dag<(), ()> = daggy::Dag::from_edges(&[
(0, 3), (0, 4),
(1, 3), (1, 4), (1, 6),
(2, 3), (2, 4),
(3, 5), (3, 6), (3, 7),
(4, 5), (4, 6), (4, 7),
(7, 8),
(8, 9),
]).unwrap();
let target = n(1); // target
let depth_map = util::generate_depth_map(&graph, target);
assert!(depth_map.get(&n(0)).is_none());
assert!(depth_map.get(&n(2)).is_none());
assert_eq!(depth_map[&n(1)], 0);
assert_eq!(depth_map[&n(3)], 1);
assert_eq!(depth_map[&n(4)], 1);
assert_eq!(depth_map[&n(5)], 2);
assert_eq!(depth_map[&n(6)], 2);
assert_eq!(depth_map[&n(7)], 2);
assert_eq!(depth_map[&n(8)], 3);
assert_eq!(depth_map[&n(9)], 4);
}
}
| {
// reverse short circuiting bfs:
// skip the dependants of the targets
// that have already been marked as obsolete
let mut queue = VecDeque::<Nx>::new();
let mut obsolete_ixs = HashSet::<Nx>::new();
for leaf_ix in obsolete_leaf_nodes {
// no need to clear the queue since it gets drained
// in the while loop each time
match &graph[*leaf_ix] {
Node::Target(_) => queue.push_back(*leaf_ix),
Node::NoRule(_) => {
let direct_dependants =
graph.neighbors_directed(*leaf_ix, Direction::Incoming);
queue.extend(direct_dependants);
}
}
| identifier_body |
host_segfault.rs | // To handle out-of-bounds reads and writes we use segfaults right now. We only
// want to catch a subset of segfaults, however, rather than all segfaults
// happening everywhere. The purpose of this test is to ensure that we *don't*
// catch segfaults if it happens in a random place in the code, but we instead
// bail out of our segfault handler early.
//
// This is sort of hard to test for but the general idea here is that we confirm
// that execution made it to our `segfault` function by printing something, and
// then we also make sure that stderr is empty to confirm that no weird panics
// happened or anything like that.
use std::env;
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::process::{Command, ExitStatus};
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() ->! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() ->! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() ->! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn drop(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if!stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if!stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" "); | desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack.
let n = approx_stack / 10 / 8;
let mut s = String::new();
s.push_str("(module\n");
s.push_str("(func $big_stack\n");
for _ in 0..n {
s.push_str("call $get\n");
}
for _ in 0..n {
s.push_str("call $take\n");
}
s.push_str(")\n");
s.push_str("(func $get (result i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) call $big_stack unreachable)\n");
s.push_str("(func $take (param i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) unreachable)\n");
s.push_str("(func (export \"\") call $big_stack)\n");
s.push_str(")\n");
// Give 100MB of stack to wasm, representing a misconfigured host. Run the
// actual module on a 2MB stack in a child thread to guarantee that the
// module here will overrun the stack. This should deterministically hit the
// guard page.
let mut config = Config::default();
config.max_wasm_stack(100 << 20).async_stack_size(100 << 20);
let engine = Engine::new(&config).unwrap();
let module = Module::new(&engine, &s).unwrap();
let mut store = Store::new(&engine, ());
let i = Instance::new(&mut store, &module, &[]).unwrap();
let f = i.get_typed_func::<(), ()>(&mut store, "").unwrap();
std::thread::Builder::new()
.stack_size(2 << 20)
.spawn(move || {
println!("{CONFIRM}");
f.call(&mut store, ()).unwrap();
})
.unwrap()
.join()
.unwrap();
unreachable!();
} | random_line_split |
|
host_segfault.rs | // To handle out-of-bounds reads and writes we use segfaults right now. We only
// want to catch a subset of segfaults, however, rather than all segfaults
// happening everywhere. The purpose of this test is to ensure that we *don't*
// catch segfaults if it happens in a random place in the code, but we instead
// bail out of our segfault handler early.
//
// This is sort of hard to test for but the general idea here is that we confirm
// that execution made it to our `segfault` function by printing something, and
// then we also make sure that stderr is empty to confirm that no weird panics
// happened or anything like that.
use std::env;
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::process::{Command, ExitStatus};
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() ->! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() ->! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() ->! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn drop(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if!stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if!stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool |
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack.
let n = approx_stack / 10 / 8;
let mut s = String::new();
s.push_str("(module\n");
s.push_str("(func $big_stack\n");
for _ in 0..n {
s.push_str("call $get\n");
}
for _ in 0..n {
s.push_str("call $take\n");
}
s.push_str(")\n");
s.push_str("(func $get (result i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) call $big_stack unreachable)\n");
s.push_str("(func $take (param i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) unreachable)\n");
s.push_str("(func (export \"\") call $big_stack)\n");
s.push_str(")\n");
// Give 100MB of stack to wasm, representing a misconfigured host. Run the
// actual module on a 2MB stack in a child thread to guarantee that the
// module here will overrun the stack. This should deterministically hit the
// guard page.
let mut config = Config::default();
config.max_wasm_stack(100 << 20).async_stack_size(100 << 20);
let engine = Engine::new(&config).unwrap();
let module = Module::new(&engine, &s).unwrap();
let mut store = Store::new(&engine, ());
let i = Instance::new(&mut store, &module, &[]).unwrap();
let f = i.get_typed_func::<(), ()>(&mut store, "").unwrap();
std::thread::Builder::new()
.stack_size(2 << 20)
.spawn(move || {
println!("{CONFIRM}");
f.call(&mut store, ()).unwrap();
})
.unwrap()
.join()
.unwrap();
unreachable!();
}
| {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
} | identifier_body |
host_segfault.rs | // To handle out-of-bounds reads and writes we use segfaults right now. We only
// want to catch a subset of segfaults, however, rather than all segfaults
// happening everywhere. The purpose of this test is to ensure that we *don't*
// catch segfaults if it happens in a random place in the code, but we instead
// bail out of our segfault handler early.
//
// This is sort of hard to test for but the general idea here is that we confirm
// that execution made it to our `segfault` function by printing something, and
// then we also make sure that stderr is empty to confirm that no weird panics
// happened or anything like that.
use std::env;
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::process::{Command, ExitStatus};
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() ->! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() ->! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() ->! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn drop(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if!stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if!stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else |
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack.
let n = approx_stack / 10 / 8;
let mut s = String::new();
s.push_str("(module\n");
s.push_str("(func $big_stack\n");
for _ in 0..n {
s.push_str("call $get\n");
}
for _ in 0..n {
s.push_str("call $take\n");
}
s.push_str(")\n");
s.push_str("(func $get (result i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) call $big_stack unreachable)\n");
s.push_str("(func $take (param i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) unreachable)\n");
s.push_str("(func (export \"\") call $big_stack)\n");
s.push_str(")\n");
// Give 100MB of stack to wasm, representing a misconfigured host. Run the
// actual module on a 2MB stack in a child thread to guarantee that the
// module here will overrun the stack. This should deterministically hit the
// guard page.
let mut config = Config::default();
config.max_wasm_stack(100 << 20).async_stack_size(100 << 20);
let engine = Engine::new(&config).unwrap();
let module = Module::new(&engine, &s).unwrap();
let mut store = Store::new(&engine, ());
let i = Instance::new(&mut store, &module, &[]).unwrap();
let f = i.get_typed_func::<(), ()>(&mut store, "").unwrap();
std::thread::Builder::new()
.stack_size(2 << 20)
.spawn(move || {
println!("{CONFIRM}");
f.call(&mut store, ()).unwrap();
})
.unwrap()
.join()
.unwrap();
unreachable!();
}
| {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
} | conditional_block |
host_segfault.rs | // To handle out-of-bounds reads and writes we use segfaults right now. We only
// want to catch a subset of segfaults, however, rather than all segfaults
// happening everywhere. The purpose of this test is to ensure that we *don't*
// catch segfaults if it happens in a random place in the code, but we instead
// bail out of our segfault handler early.
//
// This is sort of hard to test for but the general idea here is that we confirm
// that execution made it to our `segfault` function by printing something, and
// then we also make sure that stderr is empty to confirm that no weird panics
// happened or anything like that.
use std::env;
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::process::{Command, ExitStatus};
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use wasmtime::*;
const VAR_NAME: &str = "__TEST_TO_RUN";
const CONFIRM: &str = "well at least we ran up to the crash";
fn segfault() ->! {
unsafe {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
*(0x4 as *mut i32) = 3;
unreachable!()
}
}
fn allocate_stack_space() ->! {
let _a = [0u8; 1024];
for _ in 0..100000 {
allocate_stack_space();
}
unreachable!()
}
fn overrun_the_stack() ->! {
println!("{}", CONFIRM);
io::stdout().flush().unwrap();
allocate_stack_space();
}
fn run_future<F: Future>(future: F) -> F::Output {
let mut f = Pin::from(Box::new(future));
let waker = dummy_waker();
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => break val,
Poll::Pending => {}
}
}
}
fn dummy_waker() -> Waker {
return unsafe { Waker::from_raw(clone(5 as *const _)) };
unsafe fn clone(ptr: *const ()) -> RawWaker {
assert_eq!(ptr as usize, 5);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
RawWaker::new(ptr, &VTABLE)
}
unsafe fn wake(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn wake_by_ref(ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
unsafe fn | (ptr: *const ()) {
assert_eq!(ptr as usize, 5);
}
}
fn main() {
if cfg!(miri) {
return;
}
// Skip this tests if it looks like we're in a cross-compiled situation and
// we're emulating this test for a different platform. In that scenario
// emulators (like QEMU) tend to not report signals the same way and such.
if std::env::vars()
.filter(|(k, _v)| k.starts_with("CARGO_TARGET") && k.ends_with("RUNNER"))
.count()
> 0
{
return;
}
let tests: &[(&str, fn(), bool)] = &[
("normal segfault", || segfault(), false),
(
"make instance then segfault",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
segfault();
},
false,
),
(
"make instance then overrun the stack",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, "(module)").unwrap();
let _instance = Instance::new(&mut store, &module, &[]).unwrap();
overrun_the_stack();
},
true,
),
(
"segfault in a host function",
|| {
let engine = Engine::default();
let mut store = Store::new(&engine, ());
let module = Module::new(&engine, r#"(import "" "" (func)) (start 0)"#).unwrap();
let segfault = Func::wrap(&mut store, || segfault());
Instance::new(&mut store, &module, &[segfault.into()]).unwrap();
unreachable!();
},
false,
),
(
"hit async stack guard page",
|| {
let mut config = Config::default();
config.async_support(true);
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
(
"overrun 8k with misconfigured host",
|| overrun_with_big_module(8 << 10),
true,
),
(
"overrun 32k with misconfigured host",
|| overrun_with_big_module(32 << 10),
true,
),
#[cfg(not(any(target_arch = "riscv64")))]
// Due to `InstanceAllocationStrategy::pooling()` trying to alloc more than 6000G memory space.
// https://gitlab.com/qemu-project/qemu/-/issues/1214
// https://gitlab.com/qemu-project/qemu/-/issues/290
(
"hit async stack guard page with pooling allocator",
|| {
let mut config = Config::default();
config.async_support(true);
config.allocation_strategy(InstanceAllocationStrategy::pooling());
let engine = Engine::new(&config).unwrap();
let mut store = Store::new(&engine, ());
let f = Func::wrap0_async(&mut store, |_| {
Box::new(async {
overrun_the_stack();
})
});
run_future(f.call_async(&mut store, &[], &mut [])).unwrap();
unreachable!();
},
true,
),
];
match env::var(VAR_NAME) {
Ok(s) => {
let test = tests
.iter()
.find(|p| p.0 == s)
.expect("failed to find test")
.1;
test();
}
Err(_) => {
for (name, _test, stack_overflow) in tests {
println!("running {name}");
run_test(name, *stack_overflow);
}
}
}
}
fn run_test(name: &str, stack_overflow: bool) {
let me = env::current_exe().unwrap();
let mut cmd = Command::new(me);
cmd.env(VAR_NAME, name);
let output = cmd.output().expect("failed to spawn subprocess");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let mut desc = format!("got status: {}", output.status);
if!stdout.trim().is_empty() {
desc.push_str("\nstdout: ----\n");
desc.push_str(" ");
desc.push_str(&stdout.replace("\n", "\n "));
}
if!stderr.trim().is_empty() {
desc.push_str("\nstderr: ----\n");
desc.push_str(" ");
desc.push_str(&stderr.replace("\n", "\n "));
}
if stack_overflow {
if is_stack_overflow(&output.status, &stderr) {
assert!(
stdout.trim().ends_with(CONFIRM),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a stack overflow on `{}`\n{}\n\n", name, desc);
}
} else {
if is_segfault(&output.status) {
assert!(
stdout.trim().ends_with(CONFIRM) && stderr.is_empty(),
"failed to find confirmation in test `{}`\n{}",
name,
desc
);
} else {
panic!("\n\nexpected a segfault on `{}`\n{}\n\n", name, desc);
}
}
}
#[cfg(unix)]
fn is_segfault(status: &ExitStatus) -> bool {
use std::os::unix::prelude::*;
match status.signal() {
Some(libc::SIGSEGV) => true,
_ => false,
}
}
#[cfg(unix)]
fn is_stack_overflow(status: &ExitStatus, stderr: &str) -> bool {
use std::os::unix::prelude::*;
// The main thread might overflow or it might be from a fiber stack (SIGSEGV/SIGBUS)
stderr.contains("has overflowed its stack")
|| match status.signal() {
Some(libc::SIGSEGV) | Some(libc::SIGBUS) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_segfault(status: &ExitStatus) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc0000005) => true,
_ => false,
}
}
#[cfg(windows)]
fn is_stack_overflow(status: &ExitStatus, _stderr: &str) -> bool {
match status.code().map(|s| s as u32) {
Some(0xc00000fd) => true,
_ => false,
}
}
fn overrun_with_big_module(approx_stack: usize) {
// Each call to `$get` produces ten 8-byte values which need to be saved
// onto the stack, so divide `approx_stack` by 80 to get
// a rough number of calls to consume `approx_stack` stack.
let n = approx_stack / 10 / 8;
let mut s = String::new();
s.push_str("(module\n");
s.push_str("(func $big_stack\n");
for _ in 0..n {
s.push_str("call $get\n");
}
for _ in 0..n {
s.push_str("call $take\n");
}
s.push_str(")\n");
s.push_str("(func $get (result i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) call $big_stack unreachable)\n");
s.push_str("(func $take (param i64 i64 i64 i64 i64 i64 i64 i64 i64 i64) unreachable)\n");
s.push_str("(func (export \"\") call $big_stack)\n");
s.push_str(")\n");
// Give 100MB of stack to wasm, representing a misconfigured host. Run the
// actual module on a 2MB stack in a child thread to guarantee that the
// module here will overrun the stack. This should deterministically hit the
// guard page.
let mut config = Config::default();
config.max_wasm_stack(100 << 20).async_stack_size(100 << 20);
let engine = Engine::new(&config).unwrap();
let module = Module::new(&engine, &s).unwrap();
let mut store = Store::new(&engine, ());
let i = Instance::new(&mut store, &module, &[]).unwrap();
let f = i.get_typed_func::<(), ()>(&mut store, "").unwrap();
std::thread::Builder::new()
.stack_size(2 << 20)
.spawn(move || {
println!("{CONFIRM}");
f.call(&mut store, ()).unwrap();
})
.unwrap()
.join()
.unwrap();
unreachable!();
}
| drop | identifier_name |
board.rs | //! The central part of this crate, uses all modules to load and run our world in memory.
//!
//! The `Board` struct is technically all you need to start your world but then you wouldn't be able to see it!
//! Graphics are provided by the [graphics] module; although you could implement your own.
//!
//! TODO: documentation.
extern crate bincode;
extern crate rand;
#[cfg(multithreading)]
extern crate rayon;
use crate::brain::{Brain, GenerateRandom, NeuralNet, RecombinationInfinite};
use crate::climate::Climate;
use crate::constants::*;
use crate::sbip::SoftBodiesInPositions;
use crate::softbody::{HLSoftBody, SoftBody};
use crate::terrain::Terrain;
/// The amount of times a year an object is updated.
///
/// TODO: eliminate this variable because it's not needed.
const OBJECT_TIMESTEPS_PER_YEAR: f64 = 100.0;
const _POPULATION_HISTORY_LENGTH: usize = 200;
pub type BoardSize = (usize, usize);
pub type BoardCoordinate = (usize, usize);
#[derive(Clone)]
pub struct BoardPreciseCoordinate(pub f64, pub f64);
impl BoardPreciseCoordinate {
pub fn unpack(&self) -> (f64, f64) {
return (self.0, self.1);
}
}
impl From<BoardPreciseCoordinate> for BoardCoordinate {
fn from(bpc: BoardPreciseCoordinate) -> BoardCoordinate {
let (x, y) = bpc.unpack();
(x.floor() as usize, y.floor() as usize)
}
}
pub struct SelectedCreature<B: NeuralNet>(pub Option<HLSoftBody<B>>);
impl<B: NeuralNet> Default for SelectedCreature<B> {
fn default() -> Self {
SelectedCreature(None)
}
}
impl<B: NeuralNet> SelectedCreature<B> {
/// Checks if the given creature was selected and if so, removes it by setting `self.0` to `None`.
pub fn unselect_if_dead(&mut self, creature: HLSoftBody<B>) {
if let Some(sel_creature) = &self.0 {
// If `creature` isn't the same as `self.selected_creature`.
if *sel_creature!= creature {
// Then don't change to `None`.
return;
}
// Else go on
}
self.0 = None;
}
pub fn select(&mut self, creature: HLSoftBody<B>) {
self.0 = Some(creature);
}
pub fn deselect(&mut self) {
self.0 = None;
}
}
pub struct Board<B: NeuralNet = Brain> {
// Fields relevant for the board itself.
board_width: usize,
board_height: usize,
pub terrain: Terrain,
// Fields relevant for the creatures.
creature_minimum: usize,
pub soft_bodies_in_positions: SoftBodiesInPositions<B>,
pub creatures: Vec<HLSoftBody<B>>,
creature_id_up_to: usize,
// _creature_rank_metric: usize,
// Fields relevant for time or history
year: f64,
// Fields relevant for temperature
pub climate: Climate,
// Miscelanious
pub selected_creature: SelectedCreature<B>,
}
impl<B: NeuralNet + GenerateRandom> Default for Board<B> {
fn default() -> Self {
let board_size = DEFAULT_BOARD_SIZE;
let noise_step_size = DEFAULT_NOISE_STEP_SIZE;
let creature_minimum = DEFAULT_CREATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else |
}
}
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_growth_since(&self, last_updated: f64) -> f64 {
return self
.climate
.get_growth_over_time_range(self.year, last_updated);
}
/// Returns the current growth rate (temperature) based on the season.
///
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_current_growth_rate(&self) -> f64 {
self.climate.get_growth_rate(self.year)
}
/// Returns the current time, i.e. `self.year`.
pub fn get_time(&self) -> f64 {
return self.year;
}
/// Returns a tuple with the width and height of this `Board`.
///
/// Equivalent to `(board.get_board_width(), board.get_board_height())`.
pub fn get_board_size(&self) -> (usize, usize) {
return (self.board_width, self.board_height);
}
/// Returns the width of the board.
pub fn get_board_width(&self) -> usize {
return self.board_width;
}
/// Returns the height of the board.
pub fn get_board_height(&self) -> usize {
return self.board_height;
}
/// Returns the minimum amount of creatures that should be on the `Board`
///
/// When the population drops below this `maintain_creature_minimum()` spawns new creatures to fill the gap.
pub fn get_creature_minimum(&self) -> usize {
self.creature_minimum
}
/// Returns `self.creature_id_up_to`
pub fn get_creature_id_up_to(&self) -> usize {
self.creature_id_up_to
}
/// Gets the size of the current population; i.e. how many creatures are currently alive.
pub fn get_population_size(&self) -> usize {
return self.creatures.len();
}
/// Returns a `String` representing the current season.
///
/// Can be either "Winter", "Spring", "Summer" or "Autumn".
pub fn get_season(&self) -> String {
const SEASONS: [&str; 4] = ["Winter", "Spring", "Summer", "Autumn"];
let season: usize = ((self.year % 1.0) * 4.0).floor() as usize;
return SEASONS[season].to_string();
}
}
impl<B: NeuralNet + serde::de::DeserializeOwned> Board<B> {
pub fn load_from<P: AsRef<std::path::Path>>(path: P) -> Result<Board<B>, Box<std::error::Error>> {
let file = std::fs::File::open(path)?;
Ok({
use crate::serde_structs::board::BoardSerde;
let ir: BoardSerde<B> = bincode::deserialize_from(file)?;
ir.into()
})
}
}
impl<B: NeuralNet + serde::Serialize> Board<B> {
pub fn save_to<P: AsRef<std::path::Path>>(
self,
path: P,
) -> Result<(), Box<std::error::Error>> {
let file = std::fs::File::create(path)?;
bincode::serialize_into(file, &crate::serde_structs::board::BoardSerde::from(self))?;
Ok(())
}
}
| {
i += 1;
} | conditional_block |
board.rs | //! The central part of this crate, uses all modules to load and run our world in memory.
//!
//! The `Board` struct is technically all you need to start your world but then you wouldn't be able to see it!
//! Graphics are provided by the [graphics] module; although you could implement your own.
//!
//! TODO: documentation.
extern crate bincode;
extern crate rand;
#[cfg(multithreading)]
extern crate rayon;
use crate::brain::{Brain, GenerateRandom, NeuralNet, RecombinationInfinite};
use crate::climate::Climate;
use crate::constants::*;
use crate::sbip::SoftBodiesInPositions;
use crate::softbody::{HLSoftBody, SoftBody};
use crate::terrain::Terrain;
/// The amount of times a year an object is updated.
///
/// TODO: eliminate this variable because it's not needed.
const OBJECT_TIMESTEPS_PER_YEAR: f64 = 100.0;
const _POPULATION_HISTORY_LENGTH: usize = 200;
pub type BoardSize = (usize, usize);
pub type BoardCoordinate = (usize, usize);
#[derive(Clone)]
pub struct BoardPreciseCoordinate(pub f64, pub f64);
impl BoardPreciseCoordinate {
pub fn unpack(&self) -> (f64, f64) {
return (self.0, self.1);
}
}
impl From<BoardPreciseCoordinate> for BoardCoordinate {
fn from(bpc: BoardPreciseCoordinate) -> BoardCoordinate {
let (x, y) = bpc.unpack();
(x.floor() as usize, y.floor() as usize)
}
}
pub struct SelectedCreature<B: NeuralNet>(pub Option<HLSoftBody<B>>);
impl<B: NeuralNet> Default for SelectedCreature<B> {
fn default() -> Self {
SelectedCreature(None)
}
}
impl<B: NeuralNet> SelectedCreature<B> {
/// Checks if the given creature was selected and if so, removes it by setting `self.0` to `None`.
pub fn unselect_if_dead(&mut self, creature: HLSoftBody<B>) {
if let Some(sel_creature) = &self.0 {
// If `creature` isn't the same as `self.selected_creature`.
if *sel_creature!= creature {
// Then don't change to `None`.
return;
}
// Else go on
}
self.0 = None;
}
pub fn select(&mut self, creature: HLSoftBody<B>) {
self.0 = Some(creature);
}
pub fn deselect(&mut self) {
self.0 = None;
}
}
pub struct Board<B: NeuralNet = Brain> {
// Fields relevant for the board itself.
board_width: usize,
board_height: usize,
pub terrain: Terrain,
// Fields relevant for the creatures.
creature_minimum: usize,
pub soft_bodies_in_positions: SoftBodiesInPositions<B>,
pub creatures: Vec<HLSoftBody<B>>,
creature_id_up_to: usize,
// _creature_rank_metric: usize,
// Fields relevant for time or history
year: f64,
// Fields relevant for temperature
pub climate: Climate,
// Miscelanious
pub selected_creature: SelectedCreature<B>,
}
impl<B: NeuralNet + GenerateRandom> Default for Board<B> {
fn default() -> Self {
let board_size = DEFAULT_BOARD_SIZE;
let noise_step_size = DEFAULT_NOISE_STEP_SIZE;
let creature_minimum = DEFAULT_CREATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures. | board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else {
i += 1;
}
}
}
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_growth_since(&self, last_updated: f64) -> f64 {
return self
.climate
.get_growth_over_time_range(self.year, last_updated);
}
/// Returns the current growth rate (temperature) based on the season.
///
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_current_growth_rate(&self) -> f64 {
self.climate.get_growth_rate(self.year)
}
/// Returns the current time, i.e. `self.year`.
pub fn get_time(&self) -> f64 {
return self.year;
}
/// Returns a tuple with the width and height of this `Board`.
///
/// Equivalent to `(board.get_board_width(), board.get_board_height())`.
pub fn get_board_size(&self) -> (usize, usize) {
return (self.board_width, self.board_height);
}
/// Returns the width of the board.
pub fn get_board_width(&self) -> usize {
return self.board_width;
}
/// Returns the height of the board.
pub fn get_board_height(&self) -> usize {
return self.board_height;
}
/// Returns the minimum amount of creatures that should be on the `Board`
///
/// When the population drops below this `maintain_creature_minimum()` spawns new creatures to fill the gap.
pub fn get_creature_minimum(&self) -> usize {
self.creature_minimum
}
/// Returns `self.creature_id_up_to`
pub fn get_creature_id_up_to(&self) -> usize {
self.creature_id_up_to
}
/// Gets the size of the current population; i.e. how many creatures are currently alive.
pub fn get_population_size(&self) -> usize {
return self.creatures.len();
}
/// Returns a `String` representing the current season.
///
/// Can be either "Winter", "Spring", "Summer" or "Autumn".
pub fn get_season(&self) -> String {
const SEASONS: [&str; 4] = ["Winter", "Spring", "Summer", "Autumn"];
let season: usize = ((self.year % 1.0) * 4.0).floor() as usize;
return SEASONS[season].to_string();
}
}
impl<B: NeuralNet + serde::de::DeserializeOwned> Board<B> {
pub fn load_from<P: AsRef<std::path::Path>>(path: P) -> Result<Board<B>, Box<std::error::Error>> {
let file = std::fs::File::open(path)?;
Ok({
use crate::serde_structs::board::BoardSerde;
let ir: BoardSerde<B> = bincode::deserialize_from(file)?;
ir.into()
})
}
}
impl<B: NeuralNet + serde::Serialize> Board<B> {
pub fn save_to<P: AsRef<std::path::Path>>(
self,
path: P,
) -> Result<(), Box<std::error::Error>> {
let file = std::fs::File::create(path)?;
bincode::serialize_into(file, &crate::serde_structs::board::BoardSerde::from(self))?;
Ok(())
}
} | random_line_split |
|
board.rs | //! The central part of this crate, uses all modules to load and run our world in memory.
//!
//! The `Board` struct is technically all you need to start your world but then you wouldn't be able to see it!
//! Graphics are provided by the [graphics] module; although you could implement your own.
//!
//! TODO: documentation.
extern crate bincode;
extern crate rand;
#[cfg(multithreading)]
extern crate rayon;
use crate::brain::{Brain, GenerateRandom, NeuralNet, RecombinationInfinite};
use crate::climate::Climate;
use crate::constants::*;
use crate::sbip::SoftBodiesInPositions;
use crate::softbody::{HLSoftBody, SoftBody};
use crate::terrain::Terrain;
/// The amount of times a year an object is updated.
///
/// TODO: eliminate this variable because it's not needed.
const OBJECT_TIMESTEPS_PER_YEAR: f64 = 100.0;
const _POPULATION_HISTORY_LENGTH: usize = 200;
pub type BoardSize = (usize, usize);
pub type BoardCoordinate = (usize, usize);
#[derive(Clone)]
pub struct BoardPreciseCoordinate(pub f64, pub f64);
impl BoardPreciseCoordinate {
pub fn unpack(&self) -> (f64, f64) {
return (self.0, self.1);
}
}
impl From<BoardPreciseCoordinate> for BoardCoordinate {
fn from(bpc: BoardPreciseCoordinate) -> BoardCoordinate {
let (x, y) = bpc.unpack();
(x.floor() as usize, y.floor() as usize)
}
}
pub struct SelectedCreature<B: NeuralNet>(pub Option<HLSoftBody<B>>);
impl<B: NeuralNet> Default for SelectedCreature<B> {
fn default() -> Self {
SelectedCreature(None)
}
}
impl<B: NeuralNet> SelectedCreature<B> {
/// Checks if the given creature was selected and if so, removes it by setting `self.0` to `None`.
pub fn unselect_if_dead(&mut self, creature: HLSoftBody<B>) {
if let Some(sel_creature) = &self.0 {
// If `creature` isn't the same as `self.selected_creature`.
if *sel_creature!= creature {
// Then don't change to `None`.
return;
}
// Else go on
}
self.0 = None;
}
pub fn select(&mut self, creature: HLSoftBody<B>) {
self.0 = Some(creature);
}
pub fn deselect(&mut self) {
self.0 = None;
}
}
pub struct Board<B: NeuralNet = Brain> {
// Fields relevant for the board itself.
board_width: usize,
board_height: usize,
pub terrain: Terrain,
// Fields relevant for the creatures.
creature_minimum: usize,
pub soft_bodies_in_positions: SoftBodiesInPositions<B>,
pub creatures: Vec<HLSoftBody<B>>,
creature_id_up_to: usize,
// _creature_rank_metric: usize,
// Fields relevant for time or history
year: f64,
// Fields relevant for temperature
pub climate: Climate,
// Miscelanious
pub selected_creature: SelectedCreature<B>,
}
impl<B: NeuralNet + GenerateRandom> Default for Board<B> {
fn default() -> Self {
let board_size = DEFAULT_BOARD_SIZE;
let noise_step_size = DEFAULT_NOISE_STEP_SIZE;
let creature_minimum = DEFAULT_CREATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else {
i += 1;
}
}
}
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_growth_since(&self, last_updated: f64) -> f64 {
return self
.climate
.get_growth_over_time_range(self.year, last_updated);
}
/// Returns the current growth rate (temperature) based on the season.
///
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn | (&self) -> f64 {
self.climate.get_growth_rate(self.year)
}
/// Returns the current time, i.e. `self.year`.
pub fn get_time(&self) -> f64 {
return self.year;
}
/// Returns a tuple with the width and height of this `Board`.
///
/// Equivalent to `(board.get_board_width(), board.get_board_height())`.
pub fn get_board_size(&self) -> (usize, usize) {
return (self.board_width, self.board_height);
}
/// Returns the width of the board.
pub fn get_board_width(&self) -> usize {
return self.board_width;
}
/// Returns the height of the board.
pub fn get_board_height(&self) -> usize {
return self.board_height;
}
/// Returns the minimum amount of creatures that should be on the `Board`
///
/// When the population drops below this `maintain_creature_minimum()` spawns new creatures to fill the gap.
pub fn get_creature_minimum(&self) -> usize {
self.creature_minimum
}
/// Returns `self.creature_id_up_to`
pub fn get_creature_id_up_to(&self) -> usize {
self.creature_id_up_to
}
/// Gets the size of the current population; i.e. how many creatures are currently alive.
pub fn get_population_size(&self) -> usize {
return self.creatures.len();
}
/// Returns a `String` representing the current season.
///
/// Can be either "Winter", "Spring", "Summer" or "Autumn".
pub fn get_season(&self) -> String {
const SEASONS: [&str; 4] = ["Winter", "Spring", "Summer", "Autumn"];
let season: usize = ((self.year % 1.0) * 4.0).floor() as usize;
return SEASONS[season].to_string();
}
}
impl<B: NeuralNet + serde::de::DeserializeOwned> Board<B> {
pub fn load_from<P: AsRef<std::path::Path>>(path: P) -> Result<Board<B>, Box<std::error::Error>> {
let file = std::fs::File::open(path)?;
Ok({
use crate::serde_structs::board::BoardSerde;
let ir: BoardSerde<B> = bincode::deserialize_from(file)?;
ir.into()
})
}
}
impl<B: NeuralNet + serde::Serialize> Board<B> {
pub fn save_to<P: AsRef<std::path::Path>>(
self,
path: P,
) -> Result<(), Box<std::error::Error>> {
let file = std::fs::File::create(path)?;
bincode::serialize_into(file, &crate::serde_structs::board::BoardSerde::from(self))?;
Ok(())
}
}
| get_current_growth_rate | identifier_name |
board.rs | //! The central part of this crate, uses all modules to load and run our world in memory.
//!
//! The `Board` struct is technically all you need to start your world but then you wouldn't be able to see it!
//! Graphics are provided by the [graphics] module; although you could implement your own.
//!
//! TODO: documentation.
extern crate bincode;
extern crate rand;
#[cfg(multithreading)]
extern crate rayon;
use crate::brain::{Brain, GenerateRandom, NeuralNet, RecombinationInfinite};
use crate::climate::Climate;
use crate::constants::*;
use crate::sbip::SoftBodiesInPositions;
use crate::softbody::{HLSoftBody, SoftBody};
use crate::terrain::Terrain;
/// The amount of times a year an object is updated.
///
/// TODO: eliminate this variable because it's not needed.
const OBJECT_TIMESTEPS_PER_YEAR: f64 = 100.0;
const _POPULATION_HISTORY_LENGTH: usize = 200;
pub type BoardSize = (usize, usize);
pub type BoardCoordinate = (usize, usize);
#[derive(Clone)]
pub struct BoardPreciseCoordinate(pub f64, pub f64);
impl BoardPreciseCoordinate {
pub fn unpack(&self) -> (f64, f64) {
return (self.0, self.1);
}
}
impl From<BoardPreciseCoordinate> for BoardCoordinate {
fn from(bpc: BoardPreciseCoordinate) -> BoardCoordinate {
let (x, y) = bpc.unpack();
(x.floor() as usize, y.floor() as usize)
}
}
pub struct SelectedCreature<B: NeuralNet>(pub Option<HLSoftBody<B>>);
impl<B: NeuralNet> Default for SelectedCreature<B> {
fn default() -> Self {
SelectedCreature(None)
}
}
impl<B: NeuralNet> SelectedCreature<B> {
/// Checks if the given creature was selected and if so, removes it by setting `self.0` to `None`.
pub fn unselect_if_dead(&mut self, creature: HLSoftBody<B>) {
if let Some(sel_creature) = &self.0 {
// If `creature` isn't the same as `self.selected_creature`.
if *sel_creature!= creature {
// Then don't change to `None`.
return;
}
// Else go on
}
self.0 = None;
}
pub fn select(&mut self, creature: HLSoftBody<B>) {
self.0 = Some(creature);
}
pub fn deselect(&mut self) {
self.0 = None;
}
}
pub struct Board<B: NeuralNet = Brain> {
// Fields relevant for the board itself.
board_width: usize,
board_height: usize,
pub terrain: Terrain,
// Fields relevant for the creatures.
creature_minimum: usize,
pub soft_bodies_in_positions: SoftBodiesInPositions<B>,
pub creatures: Vec<HLSoftBody<B>>,
creature_id_up_to: usize,
// _creature_rank_metric: usize,
// Fields relevant for time or history
year: f64,
// Fields relevant for temperature
pub climate: Climate,
// Miscelanious
pub selected_creature: SelectedCreature<B>,
}
impl<B: NeuralNet + GenerateRandom> Default for Board<B> {
fn default() -> Self {
let board_size = DEFAULT_BOARD_SIZE;
let noise_step_size = DEFAULT_NOISE_STEP_SIZE;
let creature_minimum = DEFAULT_CREATURE_MINIMUM;
let min_temp = DEFAULT_MIN_TEMP;
let max_temp = DEFAULT_MAX_TEMP;
return Board::new_random(
board_size,
noise_step_size,
creature_minimum,
min_temp,
max_temp,
);
}
}
impl<B: NeuralNet> Board<B> {
pub fn new(board_width: usize, board_height: usize, terrain: Terrain, creature_minimum: usize, soft_bodies_in_positions: SoftBodiesInPositions<B>,
creatures: Vec<HLSoftBody<B>>, creature_id_up_to: usize, year: f64, climate: Climate, selected_creature: SelectedCreature<B>) -> Board<B>{
Board {
board_width,
board_height,
terrain,
creature_minimum,
soft_bodies_in_positions,
creatures,
creature_id_up_to,
year,
climate,
selected_creature,
}
}
}
impl<B: NeuralNet + GenerateRandom> Board<B> {
/// Randomly generates a new `Board`.
pub fn new_random(
board_size: BoardSize,
noise_step_size: f64,
creature_minimum: usize,
min_temp: f64,
max_temp: f64,
) -> Self {
let creatures = Vec::with_capacity(creature_minimum);
// Initialize climate.
let mut climate = Climate::new(min_temp, max_temp);
climate.update(0.0);
let mut board = Board {
board_width: board_size.0,
board_height: board_size.1,
terrain: Terrain::generate_perlin(board_size, noise_step_size),
creature_minimum,
soft_bodies_in_positions: SoftBodiesInPositions::new_allocated(board_size),
creatures,
creature_id_up_to: 0,
year: 0.0,
climate,
selected_creature: SelectedCreature::default(),
};
// Initialize creatures.
board.maintain_creature_minimum();
return board;
}
/// Maintains the creature minimum by adding random creatures until there are at least `self.creature_minimum` creatures.
///
/// # Processing equivalent
/// This function is the equivalent of *Board.pde/maintainCreatureMinimum* with *choosePreexisting* set to false.
fn maintain_creature_minimum(&mut self) {
while self.creatures.len() < self.creature_minimum {
let board_size = self.get_board_size();
let creature = HLSoftBody::from(SoftBody::new_random(board_size, self.year));
// Initialize in `SoftBodiesInPositions` as well.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
// Just to set the prevSBIP variables.
creature.set_sbip(&mut self.soft_bodies_in_positions, board_size);
self.creatures.push(creature);
self.creature_id_up_to += 1;
}
}
}
impl<B: NeuralNet + RecombinationInfinite + GenerateRandom> Board<B> {
pub fn update(&mut self, time_step: f64) {
self.year += time_step;
self.climate.update(self.year);
let temp_change_into_frame =
self.climate.get_temperature() - self.climate.get_growth_rate(self.year - time_step);
let temp_change_out_of_frame =
self.climate.get_growth_rate(self.year + time_step) - self.climate.get_temperature();
if temp_change_into_frame * temp_change_out_of_frame < 0.0 {
// Temperature change flipped direction
self.terrain.update_all(self.year, &self.climate);
}
self.update_creatures(time_step);
// Kill weak creatures.
self.remove_dead_creatures();
// Let creatures reproduce
self.creatures_reproduce();
// Experimental: this was moved from above to always keep the creature minimum.
self.maintain_creature_minimum();
// Move the creatures around on the board
self.move_creatures(time_step);
}
}
impl<B: NeuralNet + RecombinationInfinite> Board<B> {
fn creatures_reproduce(&mut self) {
let mut babies = Vec::new();
// Keep the borrow checker happy
{
let time = self.get_time();
let board_size = self.get_board_size();
let sbip = &mut self.soft_bodies_in_positions;
for c in &mut self.creatures {
let maybe_baby = c.try_reproduce(time, sbip, board_size);
if let Some(baby) = maybe_baby {
babies.push(baby);
}
}
}
babies.into_iter().for_each(|c| self.creatures.push(c));
}
}
impl<B: NeuralNet> Board<B> {
/// Selects the oldest creature still alive.
pub fn select_oldest(&mut self) {
let oldest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_birth_time() < c_old.borrow().get_birth_time() {
&c
} else {
c_old
}
});
self.selected_creature.select(oldest.clone());
}
/// Selects the biggest creature.
pub fn select_biggest(&mut self) {
let biggest = self.creatures.iter().fold(&self.creatures[0], |c_old, c| {
if c.borrow().get_energy() > c_old.borrow().get_energy() {
&c
} else {
c_old
}
});
self.selected_creature.select(biggest.clone());
}
#[cfg(not(multithreading))]
fn update_brains(&mut self) {
self.creatures
.iter()
.map(|c| c.borrow_mut())
.for_each(|mut c| {
let creature: &mut SoftBody<B> = &mut c;
let env = crate::brain::Environment::new(&self.terrain, &creature.base);
creature.brain.run_with(&env);
});
}
#[cfg(multithreading)]
fn update_brains(&mut self) {
self.creatures
.map(|c| c.borrow_mut())
.par_iter()
.for_each(|c| {
let env = crate::brain::Environment::new(&self.terrain, &c.base);
c.brain.run_with(&env);
});
}
pub fn update_creatures(&mut self, time_step: f64) {
use crate::brain::EnvironmentMut;
let time = self.year;
let board_size = self.get_board_size();
for c_rc in &self.creatures {
// These functions call `borrow_mut()`
c_rc.collide(&self.soft_bodies_in_positions);
let mut c = c_rc.borrow_mut();
c.record_energy();
c.metabolize(time_step, time);
}
self.update_brains();
let use_output = true;
if use_output {
for c_rc in &self.creatures {
let creature: &mut SoftBody<B> = &mut c_rc.borrow_mut();
let mut env = EnvironmentMut::new(
&mut self.terrain,
&mut creature.base,
board_size,
time,
&self.climate,
&self.soft_bodies_in_positions,
c_rc.clone(),
);
creature.brain.use_output(&mut env, time_step);
}
}
}
// #[cfg(multithreading)]
pub fn move_creatures(&mut self, time_step: f64) {
let board_size = self.get_board_size();
for c in &self.creatures {
c.apply_motions(
time_step * OBJECT_TIMESTEPS_PER_YEAR,
board_size,
&self.terrain,
&mut self.soft_bodies_in_positions,
);
}
}
pub fn prepare_for_drawing(&mut self) {
self.terrain.update_all(self.year, &self.climate);
}
/// Checks for all creatures whether they are fit enough to live and kills them off if they're not.
///
/// Utilizes the `should_die` function of `SoftBody`.
fn remove_dead_creatures(&mut self) {
let time = self.get_time();
let board_size = self.get_board_size();
let terrain = &mut self.terrain;
let climate = &self.climate;
let sbip = &mut self.soft_bodies_in_positions;
// TODO: possibly optimise code
let mut i = 0;
while i < self.creatures.len() {
// let creature = &mut self.creatures[i];
if self.creatures[i].borrow().should_die() {
self.creatures[i].return_to_earth(time, board_size, terrain, climate, sbip);
self.selected_creature
.unselect_if_dead(self.creatures[i].clone());
self.creatures.remove(i);
// println!("Dead!");
} else {
i += 1;
}
}
}
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_growth_since(&self, last_updated: f64) -> f64 {
return self
.climate
.get_growth_over_time_range(self.year, last_updated);
}
/// Returns the current growth rate (temperature) based on the season.
///
/// Performs the same function on `self.climate`, filling in `self.year`.
pub fn get_current_growth_rate(&self) -> f64 {
self.climate.get_growth_rate(self.year)
}
/// Returns the current time, i.e. `self.year`.
pub fn get_time(&self) -> f64 {
return self.year;
}
/// Returns a tuple with the width and height of this `Board`.
///
/// Equivalent to `(board.get_board_width(), board.get_board_height())`.
pub fn get_board_size(&self) -> (usize, usize) {
return (self.board_width, self.board_height);
}
/// Returns the width of the board.
pub fn get_board_width(&self) -> usize {
return self.board_width;
}
/// Returns the height of the board.
pub fn get_board_height(&self) -> usize {
return self.board_height;
}
/// Returns the minimum amount of creatures that should be on the `Board`
///
/// When the population drops below this `maintain_creature_minimum()` spawns new creatures to fill the gap.
pub fn get_creature_minimum(&self) -> usize |
/// Returns `self.creature_id_up_to`
pub fn get_creature_id_up_to(&self) -> usize {
self.creature_id_up_to
}
/// Gets the size of the current population; i.e. how many creatures are currently alive.
pub fn get_population_size(&self) -> usize {
return self.creatures.len();
}
/// Returns a `String` representing the current season.
///
/// Can be either "Winter", "Spring", "Summer" or "Autumn".
pub fn get_season(&self) -> String {
const SEASONS: [&str; 4] = ["Winter", "Spring", "Summer", "Autumn"];
let season: usize = ((self.year % 1.0) * 4.0).floor() as usize;
return SEASONS[season].to_string();
}
}
impl<B: NeuralNet + serde::de::DeserializeOwned> Board<B> {
pub fn load_from<P: AsRef<std::path::Path>>(path: P) -> Result<Board<B>, Box<std::error::Error>> {
let file = std::fs::File::open(path)?;
Ok({
use crate::serde_structs::board::BoardSerde;
let ir: BoardSerde<B> = bincode::deserialize_from(file)?;
ir.into()
})
}
}
impl<B: NeuralNet + serde::Serialize> Board<B> {
pub fn save_to<P: AsRef<std::path::Path>>(
self,
path: P,
) -> Result<(), Box<std::error::Error>> {
let file = std::fs::File::create(path)?;
bincode::serialize_into(file, &crate::serde_structs::board::BoardSerde::from(self))?;
Ok(())
}
}
| {
self.creature_minimum
} | identifier_body |
stream.rs | #[cfg(uds_peercred)]
use super::util::get_peer_ucred;
#[cfg(uds_supported)]
use super::util::raw_shutdown;
#[cfg(unix)]
use super::super::{close_by_error, handle_fd_error};
use super::{
imports::*,
util::{
check_ancillary_unsound, enable_passcred, mk_msghdr_r, mk_msghdr_w, raw_get_nonblocking,
raw_set_nonblocking,
},
AncillaryData, AncillaryDataBuf, EncodedAncillaryData, ToUdSocketPath, UdSocketPath,
};
use std::{
fmt::{self, Debug, Formatter},
io::{self, IoSlice, IoSliceMut, Read, Write},
iter,
mem::size_of,
net::Shutdown,
};
use to_method::To;
/// A Unix domain socket byte stream, obtained either from [`UdStreamListener`](super::UdStreamListener) or by connecting to an existing server.
///
/// # Examples
/// Basic example:
/// ```no_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> | if!success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recv…` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `send…` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc(cfg(any(
all(
target_os = "linux",
any(
target_env = "gnu",
target_env = "musl",
target_env = "musleabi",
target_env = "musleabihf"
)
),
target_os = "emscripten",
target_os = "redox",
target_os = "haiku"
)))
)]
pub fn get_peer_credentials(&self) -> io::Result<ucred> {
unsafe { get_peer_ucred(self.fd.0) }
}
}
impl Read for UdStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut abuf = AncillaryDataBuf::Owned(Vec::new());
self.recv_ancillary_vectored(bufs, &mut abuf).map(|x| x.0)
}
}
impl Write for UdStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.send_ancillary_vectored(bufs, iter::empty())
.map(|x| x.0)
}
fn flush(&mut self) -> io::Result<()> {
// You cannot flush a socket
Ok(())
}
}
impl Debug for UdStream {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("UdStream")
.field("file_descriptor", &self.as_raw_fd())
.finish()
}
}
#[cfg(unix)]
impl AsRawFd for UdStream {
fn as_raw_fd(&self) -> c_int {
self.fd.as_raw_fd()
}
}
#[cfg(unix)]
impl IntoRawFd for UdStream {
fn into_raw_fd(self) -> c_int {
self.fd.into_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for UdStream {
unsafe fn from_raw_fd(fd: c_int) -> Self {
Self { fd: FdOps::new(fd) }
}
}
| {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result != -1, result)
};
if success {
fd
} else {
return Err(io::Error::last_os_error());
}
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
} != 1; | identifier_body |
stream.rs | #[cfg(uds_peercred)]
use super::util::get_peer_ucred;
#[cfg(uds_supported)]
use super::util::raw_shutdown;
#[cfg(unix)]
use super::super::{close_by_error, handle_fd_error};
use super::{
imports::*,
util::{
check_ancillary_unsound, enable_passcred, mk_msghdr_r, mk_msghdr_w, raw_get_nonblocking,
raw_set_nonblocking,
},
AncillaryData, AncillaryDataBuf, EncodedAncillaryData, ToUdSocketPath, UdSocketPath,
};
use std::{
fmt::{self, Debug, Formatter},
io::{self, IoSlice, IoSliceMut, Read, Write},
iter,
mem::size_of,
net::Shutdown,
};
use to_method::To;
/// A Unix domain socket byte stream, obtained either from [`UdStreamListener`](super::UdStreamListener) or by connecting to an existing server.
///
/// # Examples
/// Basic example:
/// ```no_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result!= -1, result)
};
if success {
fd
} else {
return Err(io::Error::last_os_error());
}
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
}!= 1;
if!success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn | (&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recv…` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `send…` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc(cfg(any(
all(
target_os = "linux",
any(
target_env = "gnu",
target_env = "musl",
target_env = "musleabi",
target_env = "musleabihf"
)
),
target_os = "emscripten",
target_os = "redox",
target_os = "haiku"
)))
)]
pub fn get_peer_credentials(&self) -> io::Result<ucred> {
unsafe { get_peer_ucred(self.fd.0) }
}
}
impl Read for UdStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut abuf = AncillaryDataBuf::Owned(Vec::new());
self.recv_ancillary_vectored(bufs, &mut abuf).map(|x| x.0)
}
}
impl Write for UdStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.send_ancillary_vectored(bufs, iter::empty())
.map(|x| x.0)
}
fn flush(&mut self) -> io::Result<()> {
// You cannot flush a socket
Ok(())
}
}
impl Debug for UdStream {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("UdStream")
.field("file_descriptor", &self.as_raw_fd())
.finish()
}
}
#[cfg(unix)]
impl AsRawFd for UdStream {
fn as_raw_fd(&self) -> c_int {
self.fd.as_raw_fd()
}
}
#[cfg(unix)]
impl IntoRawFd for UdStream {
fn into_raw_fd(self) -> c_int {
self.fd.into_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for UdStream {
unsafe fn from_raw_fd(fd: c_int) -> Self {
Self { fd: FdOps::new(fd) }
}
}
| recv_vectored | identifier_name |
stream.rs | #[cfg(uds_peercred)]
use super::util::get_peer_ucred;
#[cfg(uds_supported)]
use super::util::raw_shutdown;
#[cfg(unix)]
use super::super::{close_by_error, handle_fd_error};
use super::{
imports::*,
util::{
check_ancillary_unsound, enable_passcred, mk_msghdr_r, mk_msghdr_w, raw_get_nonblocking,
raw_set_nonblocking,
},
AncillaryData, AncillaryDataBuf, EncodedAncillaryData, ToUdSocketPath, UdSocketPath,
};
use std::{
fmt::{self, Debug, Formatter},
io::{self, IoSlice, IoSliceMut, Read, Write},
iter,
mem::size_of,
net::Shutdown,
};
use to_method::To;
/// A Unix domain socket byte stream, obtained either from [`UdStreamListener`](super::UdStreamListener) or by connecting to an existing server.
///
/// # Examples
/// Basic example:
/// ```no_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
///
/// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new();
/// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result!= -1, result)
};
if success {
fd
} else |
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
}!= 1;
if!success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recv…` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `send…` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc(cfg(any(
all(
target_os = "linux",
any(
target_env = "gnu",
target_env = "musl",
target_env = "musleabi",
target_env = "musleabihf"
)
),
target_os = "emscripten",
target_os = "redox",
target_os = "haiku"
)))
)]
pub fn get_peer_credentials(&self) -> io::Result<ucred> {
unsafe { get_peer_ucred(self.fd.0) }
}
}
impl Read for UdStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut abuf = AncillaryDataBuf::Owned(Vec::new());
self.recv_ancillary_vectored(bufs, &mut abuf).map(|x| x.0)
}
}
impl Write for UdStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.send_ancillary_vectored(bufs, iter::empty())
.map(|x| x.0)
}
fn flush(&mut self) -> io::Result<()> {
// You cannot flush a socket
Ok(())
}
}
impl Debug for UdStream {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("UdStream")
.field("file_descriptor", &self.as_raw_fd())
.finish()
}
}
#[cfg(unix)]
impl AsRawFd for UdStream {
fn as_raw_fd(&self) -> c_int {
self.fd.as_raw_fd()
}
}
#[cfg(unix)]
impl IntoRawFd for UdStream {
fn into_raw_fd(self) -> c_int {
self.fd.into_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for UdStream {
unsafe fn from_raw_fd(fd: c_int) -> Self {
Self { fd: FdOps::new(fd) }
}
}
| {
return Err(io::Error::last_os_error());
} | conditional_block |
stream.rs | #[cfg(uds_peercred)]
use super::util::get_peer_ucred;
#[cfg(uds_supported)]
use super::util::raw_shutdown;
#[cfg(unix)]
use super::super::{close_by_error, handle_fd_error};
use super::{
imports::*,
util::{
check_ancillary_unsound, enable_passcred, mk_msghdr_r, mk_msghdr_w, raw_get_nonblocking,
raw_set_nonblocking,
},
AncillaryData, AncillaryDataBuf, EncodedAncillaryData, ToUdSocketPath, UdSocketPath,
};
use std::{
fmt::{self, Debug, Formatter},
io::{self, IoSlice, IoSliceMut, Read, Write},
iter,
mem::size_of,
net::Shutdown,
};
use to_method::To;
/// A Unix domain socket byte stream, obtained either from [`UdStreamListener`](super::UdStreamListener) or by connecting to an existing server.
///
/// # Examples
/// Basic example:
/// ```no_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # #[cfg(unix)] {
/// use interprocess::os::unix::udsocket::UdStream;
/// use std::io::prelude::*;
/// | /// conn.read_to_string(&mut string_buffer)?;
/// println!("Server answered: {}", string_buffer);
/// # }
/// # Ok(()) }
/// ```
pub struct UdStream {
fd: FdOps,
}
impl UdStream {
/// Connects to a Unix domain socket server at the specified path.
///
/// See [`ToUdSocketPath`] for an example of using various string types to specify socket paths.
///
/// # System calls
/// - `socket`
/// - `connect`
pub fn connect<'a>(path: impl ToUdSocketPath<'a>) -> io::Result<Self> {
Self::_connect(path.to_socket_path()?)
}
fn _connect(path: UdSocketPath<'_>) -> io::Result<Self> {
let addr = path.try_to::<sockaddr_un>()?;
let socket = {
let (success, fd) = unsafe {
let result = libc::socket(AF_UNIX, SOCK_STREAM, 0);
(result!= -1, result)
};
if success {
fd
} else {
return Err(io::Error::last_os_error());
}
};
let success = unsafe {
libc::connect(
socket,
&addr as *const _ as *const _,
size_of::<sockaddr_un>() as u32,
)
}!= 1;
if!success {
unsafe { return Err(handle_fd_error(socket)) };
}
unsafe { enable_passcred(socket).map_err(close_by_error(socket))? };
Ok(unsafe { Self::from_raw_fd(socket) })
}
/// Receives bytes from the socket stream.
///
/// # System calls
/// - `read`
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
/// Receives bytes from the socket stream, making use of [scatter input] for the main data.
///
/// # System calls
/// - `readv`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn recv_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.fd.read_vectored(bufs)
}
/// Receives both bytes and ancillary data from the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
pub fn recv_ancillary<'a: 'b, 'b>(
&self,
buf: &mut [u8],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.recv_ancillary_vectored(&mut [IoSliceMut::new(buf)], abuf)
}
/// Receives bytes and ancillary data from the socket stream, making use of [scatter input] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, mutable slices of bytes (`u8` values) can be passed directly.
///
/// # System calls
/// - `recvmsg`
///
/// [scatter input]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn recv_ancillary_vectored<'a: 'b, 'b>(
&self,
bufs: &mut [IoSliceMut<'_>],
abuf: &'b mut AncillaryDataBuf<'a>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let mut hdr = mk_msghdr_r(bufs, abuf.as_mut())?;
let (success, bytes_read) = unsafe {
let result = libc::recvmsg(self.as_raw_fd(), &mut hdr as *mut _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_read, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Sends bytes into the socket stream.
///
/// # System calls
/// - `write`
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
/// Sends bytes into the socket stream, making use of [gather output] for the main data.
///
/// # System calls
/// - `senv`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.fd.write_vectored(bufs)
}
/// Sends bytes and ancillary data into the socket stream.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
pub fn send_ancillary<'a>(
&self,
buf: &[u8],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
self.send_ancillary_vectored(&[IoSlice::new(buf)], ancillary_data)
}
/// Sends bytes and ancillary data into the socket stream, making use of [gather output] for the main data.
///
/// The ancillary data buffer is automatically converted from the supplied value, if possible. For that reason, slices and `Vec`s of `AncillaryData` can be passed directly.
///
/// # System calls
/// - `sendmsg`
///
/// [gather output]: https://en.wikipedia.org/wiki/Vectored_I/O " "
#[allow(clippy::useless_conversion)]
pub fn send_ancillary_vectored<'a>(
&self,
bufs: &[IoSlice<'_>],
ancillary_data: impl IntoIterator<Item = AncillaryData<'a>>,
) -> io::Result<(usize, usize)> {
check_ancillary_unsound()?;
let abuf = ancillary_data
.into_iter()
.collect::<EncodedAncillaryData<'_>>();
let hdr = mk_msghdr_w(bufs, abuf.as_ref())?;
let (success, bytes_written) = unsafe {
let result = libc::sendmsg(self.as_raw_fd(), &hdr as *const _, 0);
(result!= -1, result as usize)
};
if success {
Ok((bytes_written, hdr.msg_controllen as _))
} else {
Err(io::Error::last_os_error())
}
}
/// Shuts down the read, write, or both halves of the stream. See [`Shutdown`].
///
/// Attempting to call this method with the same `how` argument multiple times may return `Ok(())` every time or it may return an error the second time it is called, depending on the platform. You must either avoid using the same value twice or ignore the error entirely.
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
unsafe { raw_shutdown(self.as_raw_fd(), how) }
}
/// Enables or disables the nonblocking mode for the stream. By default, it is disabled.
///
/// In nonblocking mode, calls to the `recv…` methods and the `Read` trait methods will never wait for at least one byte of data to become available; calls to `send…` methods and the `Write` trait methods will never wait for the other side to remove enough bytes from the buffer for the write operation to be performed. Those operations will instead return a [`WouldBlock`] error immediately, allowing the thread to perform other useful operations in the meantime.
///
/// [`accept`]: #method.accept " "
/// [`incoming`]: #method.incoming " "
/// [`WouldBlock`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.WouldBlock " "
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
unsafe { raw_set_nonblocking(self.fd.0, nonblocking) }
}
/// Checks whether the stream is currently in nonblocking mode or not.
pub fn is_nonblocking(&self) -> io::Result<bool> {
unsafe { raw_get_nonblocking(self.fd.0) }
}
/// Fetches the credentials of the other end of the connection without using ancillary data. The returned structure contains the process identifier, user identifier and group identifier of the peer.
#[cfg(any(doc, uds_peercred))]
#[cfg_attr( // uds_peercred template
feature = "doc_cfg",
doc(cfg(any(
all(
target_os = "linux",
any(
target_env = "gnu",
target_env = "musl",
target_env = "musleabi",
target_env = "musleabihf"
)
),
target_os = "emscripten",
target_os = "redox",
target_os = "haiku"
)))
)]
pub fn get_peer_credentials(&self) -> io::Result<ucred> {
unsafe { get_peer_ucred(self.fd.0) }
}
}
impl Read for UdStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.fd.read(buf)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut abuf = AncillaryDataBuf::Owned(Vec::new());
self.recv_ancillary_vectored(bufs, &mut abuf).map(|x| x.0)
}
}
impl Write for UdStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.fd.write(buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.send_ancillary_vectored(bufs, iter::empty())
.map(|x| x.0)
}
fn flush(&mut self) -> io::Result<()> {
// You cannot flush a socket
Ok(())
}
}
impl Debug for UdStream {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("UdStream")
.field("file_descriptor", &self.as_raw_fd())
.finish()
}
}
#[cfg(unix)]
impl AsRawFd for UdStream {
fn as_raw_fd(&self) -> c_int {
self.fd.as_raw_fd()
}
}
#[cfg(unix)]
impl IntoRawFd for UdStream {
fn into_raw_fd(self) -> c_int {
self.fd.into_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for UdStream {
unsafe fn from_raw_fd(fd: c_int) -> Self {
Self { fd: FdOps::new(fd) }
}
} | /// let mut conn = UdStream::connect("/tmp/example1.sock")?;
/// conn.write_all(b"Hello from client!")?;
/// let mut string_buffer = String::new(); | random_line_split |
client.rs | crate::u2client::types::{RssInfo, TorrentInfo};
use super::Result;
#[derive(Clone)]
pub struct U2client {
uid: String,
passkey: String,
container: reqwest::Client,
torrentClient: TransClient,
tempSpace: String,
workSpace: String,
}
impl U2client {
pub async fn new(
cookie: &str,
proxy: &Option<String>,
RpcURL: &str,
RpcUsername: &str,
RpcPassword: &str,
workRoot: &str,
) -> Result<U2client> {
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
reqwest::header::COOKIE,
format!("nexusphp_u2={}", cookie).parse()?,
);
let mut container = reqwest::Client::builder()
.cookie_store(true)
.default_headers(headers);
if let Some(ref x) = proxy {
let proxy = reqwest::Proxy::http(x)?;
container = container.proxy(proxy);
let proxy = reqwest::Proxy::https(x)?;
container = container.proxy(proxy);
}
let container = container.build()?;
let x = container
.get("https://u2.dmhy.org/index.php")
.send()
.await?;
if x.url().path() == "/index.php" {
let context = x.text().await?;
let uid = Document::from(context.as_str())
.find(Name("a"))
.filter(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.filter_map(|n| n.attr("href"))
.map(|x| x.to_string())
.next()
.ok_or("get uid failed")?
.split('=')
.last()
.ok_or("get uid failed")?
.to_string();
let tempSpace = format!("{}/temp", workRoot);
if!Path::new(&tempSpace).exists() {
std::fs::create_dir(&tempSpace)?;
}
let workSpace = format!("{}/work", workRoot);
if!Path::new(&workSpace).exists() {
std::fs::create_dir(&workSpace)?;
}
let basic_auth = BasicAuth {
user: RpcUsername.to_string(),
password: RpcPassword.to_string(),
};
let res = container
.post("https://u2.dmhy.org/getrss.php")
.form(&[
("inclbookmarked", 0),
("inclautochecked", 1),
("trackerssl", 1),
("showrows", 10),
("search_mode", 1),
])
.send()
.await?
.text()
.await?;
let res = Document::from(res.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(str) => {
if str == "faqlink" {
match x.attr("rel") {
Some(str) => str == "nofollow noopener noreferer",
_ => false,
}
} else {
false
}
}
_ => false,
})
.unwrap()
.text();
let passkey = U2client::matchRegex(&res, "passkey=([0-9a-z]*)")?;
Ok(U2client {
uid,
passkey,
container,
torrentClient: TransClient::with_auth(&RpcURL, basic_auth),
tempSpace,
workSpace,
})
} else {
Err("illegal cookie".into())
}
}
pub async fn removeTorrent(&self, id: String) -> Result<()> {
let _ = self
.torrentClient
.torrent_remove(vec![Id::Hash(id)], true)
.await?;
Ok(())
}
pub async fn addTorrent(&self, url: &str) -> Result<()> {
let s = self.container.get(url).send().await?;
let contentDisposition = s
.headers()
.get("content-disposition")
.ok_or("addTorrent:can not find content-disposition header")?
.to_str()?;
let filename = U2client::matchRegex(contentDisposition, "filename=%5BU2%5D.(.+)")?;
let to = format!("{}/{}", self.tempSpace, filename);
let toPath = Path::new(&to);
let content = s.bytes().await?;
if toPath.exists() {
std::fs::remove_file(&toPath)?;
}
let mut file = std::fs::File::create(&toPath)?;
file.write_all(&*content)?;
let add: TorrentAddArgs = TorrentAddArgs {
filename: Some(to),
download_dir: Some(self.workSpace.clone()),
..TorrentAddArgs::default()
};
let _ = self.torrentClient.torrent_add(add).await?;
Ok(())
}
pub async fn getTransmissionSession(&self) -> Result<SessionGet> {
Ok(self.torrentClient.session_get().await?.arguments)
}
pub async fn performActionOnTorrent(&self, id: String, op: TorrentAction) -> Result<()> {
let _ = self
.torrentClient
.torrent_action(op, vec![Id::Hash(id)])
.await?;
Ok(())
}
pub async fn getWorkingTorrent(&self) -> Result<Torrents<Torrent>> {
Ok(self.torrentClient.torrent_get(None, None).await?.arguments)
}
pub async fn getStats(&self) -> Result<SessionStats> {
Ok(self.torrentClient.session_stats().await?.arguments)
}
pub async fn getFreeSpace(&self, d: String) -> Result<FreeSpace> {
Ok(self.torrentClient.free_space(d).await?.arguments)
}
pub async fn getDownloadList(&self) -> Result<Vec<RssInfo>> {
Ok(self.getTorrent().await?)
}
pub async fn getRemove(&self) -> Result<Vec<Torrent>> |
pub async fn getUserInfo(&self) -> Result<UserInfo> {
let context = self
.get(format!(
"https://u2.dmhy.org/userdetails.php?id={}",
self.uid
))
.await?;
let username = Document::from(context.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.ok_or("getUserInfo:can not find username node")?
.text();
let body: HashMap<String, String> = U2client::parseHtml(&context, 2)?;
let t = U2client::reduceToText(&body, "BT时间")?;
let timeRate = U2client::matchRegex(&t, "做种/下载时间比率:[' ']*([0-9.]+)")?;
let uploadTime = U2client::matchRegex(&t, "做种时间:[' ']*([天0-9:' ']+[0-9])")?;
let downloadTime = U2client::matchRegex(&t, "下载时间:[' ']*([天0-9:' ']+[0-9])")?;
let t = U2client::reduceToText(&body, "传输[历史]")?;
let shareRate = U2client::matchRegex(&t, "分享率:[' ']*([0-9.]+)")?;
let upload = U2client::matchRegex(&t, "上传量:[' ']*([0-9.' ']+[TGMK]iB)")?;
let download = U2client::matchRegex(&t, "下载量:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualUpload = U2client::matchRegex(&t, "实际上传:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualDownload = U2client::matchRegex(&t, "实际下载:[' ']*([0-9.' ']+[TGMK]iB)")?;
let t = U2client::reduceToText(&body, "UCoin[详情]")?;
let coin = U2client::matchRegex(&t, "[(]([0-9.,]+)[)]")?;
Ok(UserInfo {
username,
download,
upload,
shareRate,
actualDownload,
actualUpload,
coin,
downloadTime,
uploadTime,
timeRate,
})
}
/// 2 => Free
/// 3 => 2x
/// 4 => 2xFree
/// 5 => 50%off
/// 6 => 2x50%off
/// 7 => 30%off
pub async fn applyMagic(&self, uid: &str, time: i32, magic: i32) -> Result<()> {
let time = time.max(24);
let url = format!(
"https://u2.dmhy.org/promotion.php?action=magic&torrent={}",
uid
);
let post = [
("action", "magic".to_string()),
("torrent", uid.to_string()),
("user", "SELF".to_string()),
("hours", time.to_string()),
("promotion", magic.to_string()),
];
let res = self.container.post(&url).form(&post).send().await?;
if res.status().as_u16() == 200 {
Ok(())
} else {
Err("apply magic failed:network failed".into())
}
}
pub async fn getTorrent(&self) -> Result<Vec<RssInfo>> {
let url = format!(
"https://u2.dmhy.org/torrentrss.php?rows=50&trackerssl=1&passkey={}",
self.passkey
);
let content = self.get(url).await?.into_bytes();
let channel = Channel::read_from(&content[..])?;
let res = channel.items.iter().map(async move |x| -> Result<RssInfo> {
let title = x.title.clone().ok_or("getTorrent:bad rss feed")?;
let url = x.enclosure.clone().ok_or("getTorrent:bad rss feed")?.url;
let cat = x.categories[0].name.clone();
let uid = U2client::matchRegex(url.as_str(), "id=([0-9]+)")?;
let U2Info = self.getTorrentInfo(&uid).await?;
Ok(RssInfo {
title,
url,
cat,
uid,
U2Info,
})
});
let res: Vec<Result<RssInfo>> = futures::future::join_all(res).await;
let mut ret = Vec::new();
for x in res.into_iter() {
ret.push(x?);
}
Ok(ret)
}
pub async fn getTorrentInfo(&self, idx: &str) -> Result<TorrentInfo> {
let toNumber = |x: &str| -> Result<f32> {
Ok(U2client::matchRegex(&x.to_string(), "([0-9.]+)")?.parse::<f32>()?)
};
let context = self
.get(format!("https://u2.dmhy.org/details.php?id={}", idx))
.await?;
let body: HashMap<String, String> = U2client::parseHtml(&context, 1)?;
let doc = Document::from(
body.get("流量优惠")
.ok_or("getTorrentInfo:bad html")?
.as_str(),
);
let sink = doc
.find(select::predicate::Any)
.next()
.ok_or("getTorrentInfo:can find main table")?;
let typeNode = sink.find(Name("img")).next();
let (uploadFX, downloadFX) = if let Some(typeNode) = typeNode {
let typeNode = typeNode
.attr("alt")
.ok_or("getTorrentInfo:can find alt for fx")?;
match typeNode {
"FREE" => (1.0, 0.0),
"2X Free" => (2.0, 0.0),
"30%" => (1.0, 0.3),
"2X 50%" => (2.0, 0.5),
"50%" => (1.0, 0.5),
"2X" => (2.0, 1.0),
"Promotion" => {
let mut iters = sink.find(Name("b"));
let f = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
let s = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
(f, s)
}
_ => (1.0, 1.0),
}
} else {
(1.0, 1.0)
};
let s = U2client::reduceToText(&body, "基本信息")?;
let size = U2client::matchRegex(&s, "大小:[' ']*([0-9.' ']+[TGMK]iB)")?;
let number = toNumber(&*size)?;
let GbSize = match size
.chars()
.nth(size.len() - 3)
.ok_or("getTorrentInfo:bad torrent size")?
{
'T' => number * 1024.0,
'G' => number,
'M' => number / 1024.0,
_ => number / 1024.0 / 1024.0,
};
let s = U2client::reduceToText(&body, "同伴[查看列表][隐藏列表]")?;
let seeder = U2client::matchRegex(&s, "([0-9]+)[' ']*个做种者")?.parse::<i32>()?;
let leecher = U2client::matchRegex(&s, "([0-9]+)[' ']*个下载者")?.parse::<i32>()?;
let s = U2client::reduceToText(&body, "活力度")?;
let avgProgress = U2client::matchRegex(&s, "平均进度:[' ']*[(]([0-9]+%)[)]")
.unwrap_or_else(|_| String::from("100%"));
let avgProgress = toNumber(&avgProgress)? / 100.0;
let s = U2client::reduceToText(&body, "种子信息")?;
let Hash = U2client::matchRegex(&s, "种子散列值:[' ']*([0-9a-z]*)[' ']*")?;
Ok(TorrentInfo {
GbSize,
uploadFX,
downloadFX,
seeder,
leecher,
avgProgress,
Hash,
})
}
async fn get<T>(&self, url: T) -> Result<String>
where
T: IntoUrl,
{
let ret = self.container.get(url).send().await?;
if ret.status().as_u16() == 200 {
Ok(ret.text().await?)
} else {
Err(ret.text().await?.into())
}
}
fn matchRegex(src: &str, reg: &str) -> Result<String> {
Ok(Regex::new(reg)?
.captures_iter(src)
.next()
.ok_or("matchRegex:regex match failed")?
.get(1)
.ok_or("matchRegex:regex match failed")?
.as_str()
.to_string())
}
fn reduceToText(mp: &HashMap<String, String>, idx: &str) -> Result<String> {
let str = mp.get(idx).ok_or("reduceToText:broken html")?.as_str();
let ret = Document::from(str)
.find(select::predicate::Any)
.next()
.ok_or("reduceToText:can not find Any Node")?
.text();
Ok(Regex::new("([\u{00ad}\u{00a0}])")?
.replace_all(&*ret, "")
.to_string())
}
fn parseHtml(context: &str, timesOfReduce: i32) -> Result<HashMap<String, String>> {
let doc = Document::from(context);
let mut outer = doc
.find(Name("td"))
.find(|x| match x.attr("class") {
Some(x) => x == "outer",
_ => false,
})
.ok_or("parseHtml:parse failed")?;
for _ in 0..timesOfReduce {
outer = outer
.find(Name("tbody"))
.next()
.ok_or("parseHtml:reduce failed")?;
}
Ok(outer
.children()
.filter_map(|x| {
let mut V = Vec::new();
for i in x.children() {
let s = i.text();
| {
let mut torrent = self.getWorkingTorrent().await?;
torrent.torrents.sort_by_key(|x| {
(
x.peers_getting_from_us.unwrap_or(0),
x.added_date.unwrap_or(0),
)
});
Ok(torrent.torrents.into_iter().take(5).collect())
} | identifier_body |
client.rs | crate::u2client::types::{RssInfo, TorrentInfo};
use super::Result;
#[derive(Clone)]
pub struct U2client {
uid: String,
passkey: String,
container: reqwest::Client,
torrentClient: TransClient,
tempSpace: String,
workSpace: String,
}
impl U2client {
pub async fn new(
cookie: &str,
proxy: &Option<String>,
RpcURL: &str,
RpcUsername: &str,
RpcPassword: &str,
workRoot: &str,
) -> Result<U2client> {
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
reqwest::header::COOKIE,
format!("nexusphp_u2={}", cookie).parse()?,
);
let mut container = reqwest::Client::builder()
.cookie_store(true)
.default_headers(headers);
if let Some(ref x) = proxy {
let proxy = reqwest::Proxy::http(x)?;
container = container.proxy(proxy);
let proxy = reqwest::Proxy::https(x)?;
container = container.proxy(proxy);
}
let container = container.build()?;
let x = container
.get("https://u2.dmhy.org/index.php")
.send()
.await?;
if x.url().path() == "/index.php" {
let context = x.text().await?;
let uid = Document::from(context.as_str())
.find(Name("a"))
.filter(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.filter_map(|n| n.attr("href"))
.map(|x| x.to_string())
.next()
.ok_or("get uid failed")?
.split('=')
.last()
.ok_or("get uid failed")?
.to_string();
let tempSpace = format!("{}/temp", workRoot);
if!Path::new(&tempSpace).exists() {
std::fs::create_dir(&tempSpace)?;
}
let workSpace = format!("{}/work", workRoot);
if!Path::new(&workSpace).exists() {
std::fs::create_dir(&workSpace)?;
}
let basic_auth = BasicAuth {
user: RpcUsername.to_string(),
password: RpcPassword.to_string(),
};
let res = container
.post("https://u2.dmhy.org/getrss.php")
.form(&[
("inclbookmarked", 0),
("inclautochecked", 1),
("trackerssl", 1),
("showrows", 10),
("search_mode", 1),
])
.send()
.await?
.text()
.await?;
let res = Document::from(res.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(str) => {
if str == "faqlink" {
match x.attr("rel") {
Some(str) => str == "nofollow noopener noreferer",
_ => false,
}
} else {
false
}
}
_ => false,
})
.unwrap()
.text();
let passkey = U2client::matchRegex(&res, "passkey=([0-9a-z]*)")?;
Ok(U2client {
uid,
passkey,
container,
torrentClient: TransClient::with_auth(&RpcURL, basic_auth),
tempSpace,
workSpace,
})
} else {
Err("illegal cookie".into())
}
}
pub async fn removeTorrent(&self, id: String) -> Result<()> {
let _ = self
.torrentClient
.torrent_remove(vec![Id::Hash(id)], true)
.await?;
Ok(())
}
pub async fn addTorrent(&self, url: &str) -> Result<()> {
let s = self.container.get(url).send().await?;
let contentDisposition = s
.headers()
.get("content-disposition")
.ok_or("addTorrent:can not find content-disposition header")?
.to_str()?;
let filename = U2client::matchRegex(contentDisposition, "filename=%5BU2%5D.(.+)")?;
let to = format!("{}/{}", self.tempSpace, filename);
let toPath = Path::new(&to);
let content = s.bytes().await?;
if toPath.exists() {
std::fs::remove_file(&toPath)?;
}
let mut file = std::fs::File::create(&toPath)?;
file.write_all(&*content)?;
let add: TorrentAddArgs = TorrentAddArgs {
filename: Some(to),
download_dir: Some(self.workSpace.clone()),
..TorrentAddArgs::default()
};
let _ = self.torrentClient.torrent_add(add).await?;
Ok(())
}
pub async fn getTransmissionSession(&self) -> Result<SessionGet> {
Ok(self.torrentClient.session_get().await?.arguments)
}
pub async fn performActionOnTorrent(&self, id: String, op: TorrentAction) -> Result<()> {
let _ = self
.torrentClient
.torrent_action(op, vec![Id::Hash(id)])
.await?;
Ok(())
}
pub async fn getWorkingTorrent(&self) -> Result<Torrents<Torrent>> {
Ok(self.torrentClient.torrent_get(None, None).await?.arguments)
}
pub async fn getStats(&self) -> Result<SessionStats> {
Ok(self.torrentClient.session_stats().await?.arguments)
}
pub async fn getFreeSpace(&self, d: String) -> Result<FreeSpace> {
Ok(self.torrentClient.free_space(d).await?.arguments)
}
pub async fn getDownloadList(&self) -> Result<Vec<RssInfo>> {
Ok(self.getTorrent().await?)
}
pub async fn getRemove(&self) -> Result<Vec<Torrent>> {
let mut torrent = self.getWorkingTorrent().await?;
torrent.torrents.sort_by_key(|x| {
(
x.peers_getting_from_us.unwrap_or(0),
x.added_date.unwrap_or(0),
)
});
Ok(torrent.torrents.into_iter().take(5).collect())
}
pub async fn getUserInfo(&self) -> Result<UserInfo> {
let context = self
.get(format!(
"https://u2.dmhy.org/userdetails.php?id={}",
self.uid
))
.await?;
let username = Document::from(context.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.ok_or("getUserInfo:can not find username node")?
.text();
let body: HashMap<String, String> = U2client::parseHtml(&context, 2)?;
let t = U2client::reduceToText(&body, "BT时间")?;
let timeRate = U2client::matchRegex(&t, "做种/下载时间比率:[' ']*([0-9.]+)")?;
let uploadTime = U2client::matchRegex(&t, "做种时间:[' ']*([天0-9:' ']+[0-9])")?;
let downloadTime = U2client::matchRegex(&t, "下载时间:[' ']*([天0-9:' ']+[0-9])")?;
let t = U2client::reduceToText(&body, "传输[历史]")?;
let shareRate = U2client::matchRegex(&t, "分享率:[' ']*([0-9.]+)")?;
let upload = U2client::matchRegex(&t, "上传量:[' ']*([0-9.' ']+[TGMK]iB)")?;
let download = U2client::matchRegex(&t, "下载量:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualUpload = U2client::matchRegex(&t, "实际上传:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualDownload = U2client::matchRegex(&t, "实际下载:[' ']*([0-9.' ']+[TGMK]iB)")?;
let t = U2client::reduceToText(&body, "UCoin[详情]")?;
let coin = U2client::matchRegex(&t, "[(]([0-9.,]+)[)]")?;
Ok(UserInfo {
username,
download,
upload,
shareRate,
actualDownload,
actualUpload,
coin,
downloadTime,
uploadTime,
timeRate,
})
}
/// 2 => Free
/// 3 => 2x
/// 4 => 2xFree
/// 5 => 50%off
/// 6 => 2x50%off
/// 7 => 30%off
pub async fn applyMagic(&self, uid: &str, time: i32, magic: i32) -> Result<()> {
let time = time.max(24);
let url = format!(
"https://u2.dmhy.org/promotion.php?action=magic&torrent={}",
uid
);
let post = [
("action", "magic".to_string()),
("torrent", uid.to_string()),
("user", "SELF".to_string()),
("hours", time.to_string()),
("promotion", magic.to_string()),
];
let res = self.container.post(&url).form(&post).send().await?;
if res.status().as_u16() == 200 {
Ok(())
} else {
Err("apply magic failed:network failed".into())
}
}
pub async fn getTorrent(&self) -> Result<Vec<RssInfo>> {
let url = format!(
"https://u2.dmhy.org/torrentrss.php?rows=50&trackerssl=1&passkey={}",
self.passkey
);
let content = self.get(url).await?.into_bytes();
let channel = Channel::read_from(&content[..])?;
let res = channel.items.iter().map(async move |x| -> Result<RssInfo> {
let title = x.title.clone().ok_or("getTorrent:bad rss feed")?;
let url = x.enclosure.clone().ok_or("getTorrent:bad rss feed")?.url;
let cat = x.categories[0].name.clone();
let uid = U2client::matchRegex(url.as_str(), "id=([0-9]+)")?;
let U2Info = self.getTorrentInfo(&uid).await?;
Ok(RssInfo {
title,
url,
cat,
uid,
U2Info,
})
});
let res: Vec<Result<RssInfo>> = futures::future::join_all(res).await;
let mut ret = Vec::new();
for x in res.into_iter() {
ret.push(x?);
}
Ok(ret)
}
pub async fn getTorrentInfo(&self, idx: &str) -> Result<TorrentInfo> {
let toNumber = |x: &str| -> Result<f32> {
Ok(U2client::matchRegex(&x.to_string(), "([0-9.]+)")?.parse::<f32>()?)
};
let context = self
.get(format!("https://u2.dmhy.org/details.php?id={}", idx))
.await?;
let body: HashMap<String, String> = U2client::parseHtml(&context, 1)?;
let doc = Document::from(
body.get("流量优惠")
.ok_or("getTorrentInfo:bad html")?
.as_str(),
);
let sink = doc
.find(select::predicate::Any)
.next()
.ok_or("getTorrentInfo:can find main table")?;
let typeNode = sink.find(Name("img")).next();
let (uploadFX, downloadFX) = if let Some(typeNode) = typeNode {
let typeNode = typeNode
.attr("alt")
.ok_or("getTorrentInfo:can find alt for fx")?;
match typeNode {
"FREE" => (1.0, 0.0),
"2X Free" => (2.0, 0.0),
"30%" => (1.0, 0.3),
"2X 50%" => (2.0, 0.5),
"50%" => (1.0, 0.5),
"2X" => (2.0, 1.0),
"Promotion" => {
let mut iters = sink.find(Name("b"));
let f = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
let s = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
(f, s)
}
_ => (1.0, 1.0),
}
} else {
(1.0, 1.0)
};
let s = U2client::reduceToText(&body, "基本信息")?;
let size = U2client::matchRegex(&s, "大小:[' ']*([0-9.' ']+[TGMK]iB)")?;
let number = toNumber(&*size)?;
let GbSize = match size
.chars()
.nth(size.len() - 3)
.ok_or("getTorrentInfo:bad torrent size")?
{
'T' => number * 1024.0,
'G' => number,
'M' => number / 1024.0,
_ => number / 1024.0 / 1024.0,
};
let s = U2client::reduceToText(&body, "同伴[查看列表][隐藏列表]")?;
let seeder = U2client::matchRegex(&s, "([0-9]+)[' ']*个做种者")?.parse::<i32>()?;
let leecher = U2client::matchRegex(&s, "([0-9]+)[' ']*个下载者")?.parse::<i32>()?;
let s = U2client::reduceToText(&body, "活力度")?;
let avgProgress = U2client::matchRegex(&s, "平均进度:[' ']*[(]([0-9]+%)[)]")
.unwrap_or_else(|_| String::from("100%"));
let avgProgress = toNumber(&avgProgress)? / 100.0;
let s = U2client::reduceToText(&body, "种子信息")?;
let Hash = U2client::matchRegex(&s, "种子散列值:[' ']*([0-9a-z]*)[' ']*")?;
Ok(TorrentInfo {
GbSize,
uploadFX,
downloadFX,
seeder,
leecher,
avgProgress,
Hash,
})
}
async fn get<T>(&self, url: T) -> Result<String>
where
T: IntoUrl,
{
let ret = self.container.get(url).send().await?;
if ret.status().as_u16() == 200 {
Ok(ret.text().await?)
} else {
Err(ret.text().await?.into())
}
}
fn matchRegex(src: &str, reg: &str) -> Result<String> {
Ok(Regex::new(reg)?
.captures_iter(src)
.next()
.ok_or("matchRegex:regex match failed")?
.get(1)
.ok_or("matchRegex:regex match failed")?
.as_str()
.to_string())
}
fn reduceToText(mp: &HashMap<String, String>, idx: &str) -> Result<String> {
let str = mp.get(idx).ok_or("reduceToText:broken html")?.as_str();
let ret = Document::from(str)
.find(select::predicate::Any)
.next()
.ok_or("reduceToText:can not find Any Node")?
.text();
Ok(Regex::new("([\u{00ad}\u{00a0}])")?
.replace_all(&*ret, "")
.to_string())
}
fn parseHtml(context: &str, timesOfReduce: i32) -> Result<HashMap<String, String>> {
let doc = Document::from(context);
let mut outer = doc
.find(Nam | .find(|x| match x.attr("class") {
Some(x) => x == "outer",
_ => false,
})
.ok_or("parseHtml:parse failed")?;
for _ in 0..timesOfReduce {
outer = outer
.find(Name("tbody"))
.next()
.ok_or("parseHtml:reduce failed")?;
}
Ok(outer
.children()
.filter_map(|x| {
let mut V = Vec::new();
for i in x.children() {
let s = i.text();
| e("td"))
| identifier_name |
client.rs |
use crate::u2client::types::{RssInfo, TorrentInfo};
use super::Result;
#[derive(Clone)]
pub struct U2client {
uid: String,
passkey: String,
container: reqwest::Client,
torrentClient: TransClient,
tempSpace: String,
workSpace: String,
}
impl U2client {
pub async fn new(
cookie: &str,
proxy: &Option<String>,
RpcURL: &str,
RpcUsername: &str,
RpcPassword: &str,
workRoot: &str,
) -> Result<U2client> {
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
reqwest::header::COOKIE,
format!("nexusphp_u2={}", cookie).parse()?,
);
let mut container = reqwest::Client::builder()
.cookie_store(true)
.default_headers(headers);
if let Some(ref x) = proxy {
let proxy = reqwest::Proxy::http(x)?;
container = container.proxy(proxy);
let proxy = reqwest::Proxy::https(x)?;
container = container.proxy(proxy);
}
let container = container.build()?;
| .get("https://u2.dmhy.org/index.php")
.send()
.await?;
if x.url().path() == "/index.php" {
let context = x.text().await?;
let uid = Document::from(context.as_str())
.find(Name("a"))
.filter(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.filter_map(|n| n.attr("href"))
.map(|x| x.to_string())
.next()
.ok_or("get uid failed")?
.split('=')
.last()
.ok_or("get uid failed")?
.to_string();
let tempSpace = format!("{}/temp", workRoot);
if!Path::new(&tempSpace).exists() {
std::fs::create_dir(&tempSpace)?;
}
let workSpace = format!("{}/work", workRoot);
if!Path::new(&workSpace).exists() {
std::fs::create_dir(&workSpace)?;
}
let basic_auth = BasicAuth {
user: RpcUsername.to_string(),
password: RpcPassword.to_string(),
};
let res = container
.post("https://u2.dmhy.org/getrss.php")
.form(&[
("inclbookmarked", 0),
("inclautochecked", 1),
("trackerssl", 1),
("showrows", 10),
("search_mode", 1),
])
.send()
.await?
.text()
.await?;
let res = Document::from(res.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(str) => {
if str == "faqlink" {
match x.attr("rel") {
Some(str) => str == "nofollow noopener noreferer",
_ => false,
}
} else {
false
}
}
_ => false,
})
.unwrap()
.text();
let passkey = U2client::matchRegex(&res, "passkey=([0-9a-z]*)")?;
Ok(U2client {
uid,
passkey,
container,
torrentClient: TransClient::with_auth(&RpcURL, basic_auth),
tempSpace,
workSpace,
})
} else {
Err("illegal cookie".into())
}
}
pub async fn removeTorrent(&self, id: String) -> Result<()> {
let _ = self
.torrentClient
.torrent_remove(vec![Id::Hash(id)], true)
.await?;
Ok(())
}
pub async fn addTorrent(&self, url: &str) -> Result<()> {
let s = self.container.get(url).send().await?;
let contentDisposition = s
.headers()
.get("content-disposition")
.ok_or("addTorrent:can not find content-disposition header")?
.to_str()?;
let filename = U2client::matchRegex(contentDisposition, "filename=%5BU2%5D.(.+)")?;
let to = format!("{}/{}", self.tempSpace, filename);
let toPath = Path::new(&to);
let content = s.bytes().await?;
if toPath.exists() {
std::fs::remove_file(&toPath)?;
}
let mut file = std::fs::File::create(&toPath)?;
file.write_all(&*content)?;
let add: TorrentAddArgs = TorrentAddArgs {
filename: Some(to),
download_dir: Some(self.workSpace.clone()),
..TorrentAddArgs::default()
};
let _ = self.torrentClient.torrent_add(add).await?;
Ok(())
}
pub async fn getTransmissionSession(&self) -> Result<SessionGet> {
Ok(self.torrentClient.session_get().await?.arguments)
}
pub async fn performActionOnTorrent(&self, id: String, op: TorrentAction) -> Result<()> {
let _ = self
.torrentClient
.torrent_action(op, vec![Id::Hash(id)])
.await?;
Ok(())
}
pub async fn getWorkingTorrent(&self) -> Result<Torrents<Torrent>> {
Ok(self.torrentClient.torrent_get(None, None).await?.arguments)
}
pub async fn getStats(&self) -> Result<SessionStats> {
Ok(self.torrentClient.session_stats().await?.arguments)
}
pub async fn getFreeSpace(&self, d: String) -> Result<FreeSpace> {
Ok(self.torrentClient.free_space(d).await?.arguments)
}
pub async fn getDownloadList(&self) -> Result<Vec<RssInfo>> {
Ok(self.getTorrent().await?)
}
pub async fn getRemove(&self) -> Result<Vec<Torrent>> {
let mut torrent = self.getWorkingTorrent().await?;
torrent.torrents.sort_by_key(|x| {
(
x.peers_getting_from_us.unwrap_or(0),
x.added_date.unwrap_or(0),
)
});
Ok(torrent.torrents.into_iter().take(5).collect())
}
pub async fn getUserInfo(&self) -> Result<UserInfo> {
let context = self
.get(format!(
"https://u2.dmhy.org/userdetails.php?id={}",
self.uid
))
.await?;
let username = Document::from(context.as_str())
.find(Name("a"))
.find(|x| match x.attr("class") {
Some(x) => x.split_at(x.len() - 5).1 == "_Name",
_ => false,
})
.ok_or("getUserInfo:can not find username node")?
.text();
let body: HashMap<String, String> = U2client::parseHtml(&context, 2)?;
let t = U2client::reduceToText(&body, "BT时间")?;
let timeRate = U2client::matchRegex(&t, "做种/下载时间比率:[' ']*([0-9.]+)")?;
let uploadTime = U2client::matchRegex(&t, "做种时间:[' ']*([天0-9:' ']+[0-9])")?;
let downloadTime = U2client::matchRegex(&t, "下载时间:[' ']*([天0-9:' ']+[0-9])")?;
let t = U2client::reduceToText(&body, "传输[历史]")?;
let shareRate = U2client::matchRegex(&t, "分享率:[' ']*([0-9.]+)")?;
let upload = U2client::matchRegex(&t, "上传量:[' ']*([0-9.' ']+[TGMK]iB)")?;
let download = U2client::matchRegex(&t, "下载量:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualUpload = U2client::matchRegex(&t, "实际上传:[' ']*([0-9.' ']+[TGMK]iB)")?;
let actualDownload = U2client::matchRegex(&t, "实际下载:[' ']*([0-9.' ']+[TGMK]iB)")?;
let t = U2client::reduceToText(&body, "UCoin[详情]")?;
let coin = U2client::matchRegex(&t, "[(]([0-9.,]+)[)]")?;
Ok(UserInfo {
username,
download,
upload,
shareRate,
actualDownload,
actualUpload,
coin,
downloadTime,
uploadTime,
timeRate,
})
}
/// 2 => Free
/// 3 => 2x
/// 4 => 2xFree
/// 5 => 50%off
/// 6 => 2x50%off
/// 7 => 30%off
pub async fn applyMagic(&self, uid: &str, time: i32, magic: i32) -> Result<()> {
let time = time.max(24);
let url = format!(
"https://u2.dmhy.org/promotion.php?action=magic&torrent={}",
uid
);
let post = [
("action", "magic".to_string()),
("torrent", uid.to_string()),
("user", "SELF".to_string()),
("hours", time.to_string()),
("promotion", magic.to_string()),
];
let res = self.container.post(&url).form(&post).send().await?;
if res.status().as_u16() == 200 {
Ok(())
} else {
Err("apply magic failed:network failed".into())
}
}
pub async fn getTorrent(&self) -> Result<Vec<RssInfo>> {
let url = format!(
"https://u2.dmhy.org/torrentrss.php?rows=50&trackerssl=1&passkey={}",
self.passkey
);
let content = self.get(url).await?.into_bytes();
let channel = Channel::read_from(&content[..])?;
let res = channel.items.iter().map(async move |x| -> Result<RssInfo> {
let title = x.title.clone().ok_or("getTorrent:bad rss feed")?;
let url = x.enclosure.clone().ok_or("getTorrent:bad rss feed")?.url;
let cat = x.categories[0].name.clone();
let uid = U2client::matchRegex(url.as_str(), "id=([0-9]+)")?;
let U2Info = self.getTorrentInfo(&uid).await?;
Ok(RssInfo {
title,
url,
cat,
uid,
U2Info,
})
});
let res: Vec<Result<RssInfo>> = futures::future::join_all(res).await;
let mut ret = Vec::new();
for x in res.into_iter() {
ret.push(x?);
}
Ok(ret)
}
pub async fn getTorrentInfo(&self, idx: &str) -> Result<TorrentInfo> {
let toNumber = |x: &str| -> Result<f32> {
Ok(U2client::matchRegex(&x.to_string(), "([0-9.]+)")?.parse::<f32>()?)
};
let context = self
.get(format!("https://u2.dmhy.org/details.php?id={}", idx))
.await?;
let body: HashMap<String, String> = U2client::parseHtml(&context, 1)?;
let doc = Document::from(
body.get("流量优惠")
.ok_or("getTorrentInfo:bad html")?
.as_str(),
);
let sink = doc
.find(select::predicate::Any)
.next()
.ok_or("getTorrentInfo:can find main table")?;
let typeNode = sink.find(Name("img")).next();
let (uploadFX, downloadFX) = if let Some(typeNode) = typeNode {
let typeNode = typeNode
.attr("alt")
.ok_or("getTorrentInfo:can find alt for fx")?;
match typeNode {
"FREE" => (1.0, 0.0),
"2X Free" => (2.0, 0.0),
"30%" => (1.0, 0.3),
"2X 50%" => (2.0, 0.5),
"50%" => (1.0, 0.5),
"2X" => (2.0, 1.0),
"Promotion" => {
let mut iters = sink.find(Name("b"));
let f = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
let s = toNumber(
&*iters
.next()
.ok_or("getTorrentInfo:can find promotion")?
.text(),
)?;
(f, s)
}
_ => (1.0, 1.0),
}
} else {
(1.0, 1.0)
};
let s = U2client::reduceToText(&body, "基本信息")?;
let size = U2client::matchRegex(&s, "大小:[' ']*([0-9.' ']+[TGMK]iB)")?;
let number = toNumber(&*size)?;
let GbSize = match size
.chars()
.nth(size.len() - 3)
.ok_or("getTorrentInfo:bad torrent size")?
{
'T' => number * 1024.0,
'G' => number,
'M' => number / 1024.0,
_ => number / 1024.0 / 1024.0,
};
let s = U2client::reduceToText(&body, "同伴[查看列表][隐藏列表]")?;
let seeder = U2client::matchRegex(&s, "([0-9]+)[' ']*个做种者")?.parse::<i32>()?;
let leecher = U2client::matchRegex(&s, "([0-9]+)[' ']*个下载者")?.parse::<i32>()?;
let s = U2client::reduceToText(&body, "活力度")?;
let avgProgress = U2client::matchRegex(&s, "平均进度:[' ']*[(]([0-9]+%)[)]")
.unwrap_or_else(|_| String::from("100%"));
let avgProgress = toNumber(&avgProgress)? / 100.0;
let s = U2client::reduceToText(&body, "种子信息")?;
let Hash = U2client::matchRegex(&s, "种子散列值:[' ']*([0-9a-z]*)[' ']*")?;
Ok(TorrentInfo {
GbSize,
uploadFX,
downloadFX,
seeder,
leecher,
avgProgress,
Hash,
})
}
async fn get<T>(&self, url: T) -> Result<String>
where
T: IntoUrl,
{
let ret = self.container.get(url).send().await?;
if ret.status().as_u16() == 200 {
Ok(ret.text().await?)
} else {
Err(ret.text().await?.into())
}
}
fn matchRegex(src: &str, reg: &str) -> Result<String> {
Ok(Regex::new(reg)?
.captures_iter(src)
.next()
.ok_or("matchRegex:regex match failed")?
.get(1)
.ok_or("matchRegex:regex match failed")?
.as_str()
.to_string())
}
fn reduceToText(mp: &HashMap<String, String>, idx: &str) -> Result<String> {
let str = mp.get(idx).ok_or("reduceToText:broken html")?.as_str();
let ret = Document::from(str)
.find(select::predicate::Any)
.next()
.ok_or("reduceToText:can not find Any Node")?
.text();
Ok(Regex::new("([\u{00ad}\u{00a0}])")?
.replace_all(&*ret, "")
.to_string())
}
fn parseHtml(context: &str, timesOfReduce: i32) -> Result<HashMap<String, String>> {
let doc = Document::from(context);
let mut outer = doc
.find(Name("td"))
.find(|x| match x.attr("class") {
Some(x) => x == "outer",
_ => false,
})
.ok_or("parseHtml:parse failed")?;
for _ in 0..timesOfReduce {
outer = outer
.find(Name("tbody"))
.next()
.ok_or("parseHtml:reduce failed")?;
}
Ok(outer
.children()
.filter_map(|x| {
let mut V = Vec::new();
for i in x.children() {
let s = i.text();
| let x = container | random_line_split |
tiles.rs | use std::cmp::min;
use std::fs::File;
use std::path::Path;
use std::io::{Read, Write, BufWriter, Error};
use game::base::*;
use io::base::*;
use map::constants::*;
use map::material::*;
pub type Tiles = Vec<Tile>;
pub type PosUnit = i32;
const CHUNK_TILES_X: PosUnit = 8;
const CHUNK_TILES_Y: PosUnit = 8;
const CHUNK_TILES_Z: PosUnit = 1;
//TODO Clean up unwraps
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
pub struct Tile {
//Single map unit
pub material: MaterialID,
pub mode: Mode,
pub marked: bool,
}
#[derive(Clone)]
pub struct Map {
//Holds the terrain info as a vector of tiles
tiles: Tiles,
pub materials: Materials,
xlen: PosUnit,
ylen: PosUnit,
zlen: PosUnit,
}
#[derive(Debug, Clone)]
pub struct MapSnapshot {
//Represents a slice of the map
//to be be delivered to the rendering engine
pub tiles: Tiles,
pub xlen: PosUnit,
pub ylen: PosUnit,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MapChunk {
pub tiles: Tiles,
pub pos: Pos,
pub xlen: PosUnit,
pub ylen: PosUnit,
pub zlen: PosUnit,
}
pub fn init_map(root: &Path) -> Map {
info!("Initializing map");
let test_path = root.join("static/inc/maps/smol_map_excel.sfm.csv");
let path_str = test_path
.to_str()
.expect("Unicode decode error");
// Load materials properties file
let materials = init_materials(root);
load_map(path_str, materials).expect("Could not load map")
}
impl Tile {
fn new(material: MaterialID, mode: Mode) -> Tile {
Tile {
material: material,
mode: mode,
marked: false,
}
}
}
impl Map {
#[allow(dead_code)]
pub fn print(&self) {
//Debug print method
//[debug] func
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
match self.get_tile((x, y, z)) {
Some(tile) => print!("{0}", tile.material % 10),
None => print!(" "),
}
}
println!();
}
println!();
}
}
pub fn size(&self) -> Pos {
(self.xlen, self.ylen, self.zlen)
}
// Resize map as given with blank tiles
pub fn resize(&mut self, pos: Pos) |
pub fn get_chunk(&self, pos: Pos, size: Pos) -> MapChunk {
let (x0, y0, z0) = pos;
let (xlen, ylen, zlen) = size;
let mut tiles = Tiles::new();
for x in x0..(x0 + xlen) {
for y in y0..(y0 + ylen) {
for z in z0..(z0 + zlen) {
let index = self.coords_to_index((x, y, z));
tiles.push(self.tiles[index]);
}
}
}
MapChunk {
tiles: tiles,
pos: pos,
xlen: xlen,
ylen: ylen,
zlen: zlen,
}
}
// TODO Add duplication factor
pub fn to_chunks(&self) -> Vec<MapChunk> {
let mut chunks = Vec::<MapChunk>::new();
let x_chunks = Map::get_num_chunks(self.xlen, CHUNK_TILES_X);
let y_chunks = Map::get_num_chunks(self.ylen, CHUNK_TILES_Y);
let z_chunks = Map::get_num_chunks(self.zlen, CHUNK_TILES_Z);
for dx in 0..x_chunks {
for dy in 0..y_chunks {
for dz in 0..z_chunks {
let x = dx * CHUNK_TILES_X;
let y = dy * CHUNK_TILES_Y;
let z = dz * CHUNK_TILES_Z;
let pos = (x, y, z);
let xlen = min(CHUNK_TILES_X, self.xlen - dx * CHUNK_TILES_X);
let ylen = min(CHUNK_TILES_Y, self.ylen - dy * CHUNK_TILES_Y);
let zlen = min(CHUNK_TILES_Z, self.zlen - dz * CHUNK_TILES_Z);
let size = (xlen, ylen, zlen);
chunks.push(self.get_chunk(pos, size))
}
}
}
chunks
}
fn get_num_chunks(map_len: PosUnit, chunk_len: PosUnit) -> PosUnit {
if map_len % chunk_len == 0 {
map_len / chunk_len
} else {
map_len / chunk_len + 1
}
}
pub fn apply_chunk(&mut self, chunk: &MapChunk) {
let (x0, y0, z0) = chunk.pos;
let mut chunk_i = 0;
for x in 0..chunk.xlen {
for y in 0..chunk.ylen {
for z in 0..chunk.zlen {
let mx = x + x0;
let my = (y + y0) * self.xlen;
let mz = (z + z0) * self.xlen * self.ylen;
let map_i = (mx + my + mz) as usize;
self.tiles[map_i] = chunk.tiles[chunk_i];
chunk_i += 1;
}
}
}
}
/// Tile accesor method
pub fn get_tile(&self, pos: Pos) -> Option<Tile> {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
Some(self.tiles[index])
} else {
None
}
}
/// Perform some mutable operation to a tile
fn apply_tile_func<F>(&mut self, pos: Pos, func: F)
where F: Fn(&mut Tile) {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
func(&mut self.tiles[index]);
}
}
fn in_bounds(&self, pos: Pos) -> bool {
let (x, y, z) = pos;
!(0 > x || 0 > y || 0 > z || x >= self.xlen || y >= self.ylen || z >= self.zlen)
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, z) = pos;
(x + y * self.xlen + z * self.xlen * self.ylen) as usize
}
pub fn update_tile(&mut self, new_tile: Tile, pos: Pos) {
self.apply_tile_func(pos, |tile| {
tile.material = new_tile.material;
tile.marked = false;
});
}
pub fn dig(&mut self, pos: Pos) {
let alt = self.get_alt(pos);
self.apply_tile_func(pos, |tile| tile.material = alt);
}
pub fn mark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = true);
}
#[allow(dead_code)]
pub fn unmark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = false);
}
fn grab_material(&self, pos: Pos) -> Option<Material> {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
Some(material.clone())
} else {
None
}
} else {
None
}
}
pub fn get_alt(&self, pos: Pos) -> MaterialID {
if let Some(material) = self.grab_material(pos) {
material.alt
} else {
0
}
}
pub fn diggable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
material.diggable &&!tile.marked
} else {
false
}
} else {
false
}
}
pub fn passable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
match tile.mode {
Mode::Block => false,
_ => true,
}
} else {
false
}
}
#[allow(dead_code)]
pub fn save(&self, path: &str) -> Result<(), Error> {
//Saves map as file. Currently unversioned, so take heed.
let f = try!(File::create(&path));
let mut writer = BufWriter::new(&f);
try!(write!(&mut writer, "{} {} {}\n", self.xlen, self.ylen, self.zlen));
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
try!(write!(&mut writer, "{} ",
self.get_tile((x, y, z)).expect("Malformed map").material));
}
try!(write!(&mut writer, "\n"));
}
try!(write!(&mut writer, "\n"));
}
Ok(())
}
}
impl MapSnapshot {
#[allow(dead_code)]
pub fn print(&self) {
//MapSnapshot debug
//[debug] func
for y in 0..self.ylen {
for x in 0..self.xlen {
let index = self.coords_to_index((x, y, 0));
print!("{0}", self.tiles[index].material % 10);
}
println!();
}
println!();
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, _) = pos;
(x + y * self.xlen) as usize
}
}
pub fn handle_to_snapshot(handle: &CameraHandle, map: &Map) -> MapSnapshot {
//Uses handle and map to generate 2D snapshot
//Eventually 3D snapshots may be enabled
//Base interface method between rendering engine and map
let mut tiles = Vec::with_capacity((handle.xlen * handle.ylen) as usize);
for y in handle.y..handle.y + handle.ylen {
for x in handle.x..handle.x + handle.xlen {
match map.get_tile((x, y, handle.z)) {
//Get_tile returns valid tile
Some(tile) => tiles.push(tile),
//Otherwise display as air
None => tiles.push(AIR_TILE),
}
}
}
MapSnapshot {tiles: tiles, xlen: handle.xlen, ylen: handle.ylen}
}
pub fn blank_map(root: &Path) -> Map {
// Load materials properties file
let materials = init_materials(root);
Map {
tiles: Tiles::new(),
materials: materials,
xlen: 0,
ylen: 0,
zlen: 0,
}
}
pub fn load_map(path: &str, materials: Materials) -> Result<Map, Error> {
// Load map from file. Currently unversioned so take heed.
// Map validation is not performed.
let mut f = try!(File::open(&path));
let mut contents = String::new();
try!(f.read_to_string(&mut contents));
let mut tiles = Vec::new();
let (mut xlen, mut ylen, mut zlen) = (0i32, 0i32, 0i32);
for (i, line) in contents.lines().enumerate() {
if i == 0 {
let mut split_line = line.split(",");
let version: i32 = split_line.next().unwrap().parse().unwrap();
assert!(version >= 1);
xlen = split_line.next().unwrap().parse().unwrap();
ylen = split_line.next().unwrap().parse().unwrap();
zlen = split_line.next().unwrap().parse().unwrap();
} else {
for word in line.split(",") {
let mut word_parts = word.split(";");
if let Some(material_str) = word_parts.next() {
if material_str.len() > 0 {
let material: u16 = material_str.parse().unwrap();
let mode: Mode = match word_parts.next() {
Some(mode_str) => to_mode(mode_str.parse().unwrap()).unwrap(),
None => Mode::Block,
};
tiles.push(Tile::new(material, mode));
}
}
}
}
}
Ok(Map {tiles: tiles, materials: materials, xlen: xlen, ylen: ylen, zlen: zlen})
}
| {
let (x, y, z) = pos;
self.tiles = vec![AIR_TILE; (x * y * z) as usize];
self.xlen = x;
self.ylen = y;
self.zlen = z;
} | identifier_body |
tiles.rs | use std::cmp::min;
use std::fs::File;
use std::path::Path;
use std::io::{Read, Write, BufWriter, Error};
use game::base::*;
use io::base::*;
use map::constants::*;
use map::material::*;
pub type Tiles = Vec<Tile>;
pub type PosUnit = i32;
const CHUNK_TILES_X: PosUnit = 8;
const CHUNK_TILES_Y: PosUnit = 8;
const CHUNK_TILES_Z: PosUnit = 1;
//TODO Clean up unwraps
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
pub struct Tile {
//Single map unit
pub material: MaterialID,
pub mode: Mode,
pub marked: bool,
}
#[derive(Clone)]
pub struct Map {
//Holds the terrain info as a vector of tiles
tiles: Tiles,
pub materials: Materials,
xlen: PosUnit,
ylen: PosUnit,
zlen: PosUnit,
}
#[derive(Debug, Clone)]
pub struct MapSnapshot {
//Represents a slice of the map
//to be be delivered to the rendering engine
pub tiles: Tiles,
pub xlen: PosUnit,
pub ylen: PosUnit,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MapChunk {
pub tiles: Tiles,
pub pos: Pos,
pub xlen: PosUnit,
pub ylen: PosUnit,
pub zlen: PosUnit,
}
pub fn init_map(root: &Path) -> Map {
info!("Initializing map");
let test_path = root.join("static/inc/maps/smol_map_excel.sfm.csv");
let path_str = test_path
.to_str()
.expect("Unicode decode error");
// Load materials properties file
let materials = init_materials(root);
load_map(path_str, materials).expect("Could not load map")
}
impl Tile {
fn new(material: MaterialID, mode: Mode) -> Tile {
Tile {
material: material,
mode: mode,
marked: false,
}
}
}
impl Map {
#[allow(dead_code)]
pub fn print(&self) {
//Debug print method
//[debug] func
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
match self.get_tile((x, y, z)) {
Some(tile) => print!("{0}", tile.material % 10),
None => print!(" "),
}
}
println!();
}
println!();
}
}
pub fn size(&self) -> Pos {
(self.xlen, self.ylen, self.zlen)
}
// Resize map as given with blank tiles
pub fn resize(&mut self, pos: Pos) {
let (x, y, z) = pos;
self.tiles = vec![AIR_TILE; (x * y * z) as usize];
self.xlen = x;
self.ylen = y;
self.zlen = z;
}
pub fn get_chunk(&self, pos: Pos, size: Pos) -> MapChunk {
let (x0, y0, z0) = pos;
let (xlen, ylen, zlen) = size;
let mut tiles = Tiles::new();
for x in x0..(x0 + xlen) {
for y in y0..(y0 + ylen) {
for z in z0..(z0 + zlen) {
let index = self.coords_to_index((x, y, z));
tiles.push(self.tiles[index]);
}
}
}
MapChunk {
tiles: tiles,
pos: pos,
xlen: xlen,
ylen: ylen,
zlen: zlen,
}
}
// TODO Add duplication factor
pub fn to_chunks(&self) -> Vec<MapChunk> {
let mut chunks = Vec::<MapChunk>::new();
let x_chunks = Map::get_num_chunks(self.xlen, CHUNK_TILES_X);
let y_chunks = Map::get_num_chunks(self.ylen, CHUNK_TILES_Y);
let z_chunks = Map::get_num_chunks(self.zlen, CHUNK_TILES_Z);
for dx in 0..x_chunks {
for dy in 0..y_chunks {
for dz in 0..z_chunks {
let x = dx * CHUNK_TILES_X;
let y = dy * CHUNK_TILES_Y; | let zlen = min(CHUNK_TILES_Z, self.zlen - dz * CHUNK_TILES_Z);
let size = (xlen, ylen, zlen);
chunks.push(self.get_chunk(pos, size))
}
}
}
chunks
}
fn get_num_chunks(map_len: PosUnit, chunk_len: PosUnit) -> PosUnit {
if map_len % chunk_len == 0 {
map_len / chunk_len
} else {
map_len / chunk_len + 1
}
}
pub fn apply_chunk(&mut self, chunk: &MapChunk) {
let (x0, y0, z0) = chunk.pos;
let mut chunk_i = 0;
for x in 0..chunk.xlen {
for y in 0..chunk.ylen {
for z in 0..chunk.zlen {
let mx = x + x0;
let my = (y + y0) * self.xlen;
let mz = (z + z0) * self.xlen * self.ylen;
let map_i = (mx + my + mz) as usize;
self.tiles[map_i] = chunk.tiles[chunk_i];
chunk_i += 1;
}
}
}
}
/// Tile accesor method
pub fn get_tile(&self, pos: Pos) -> Option<Tile> {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
Some(self.tiles[index])
} else {
None
}
}
/// Perform some mutable operation to a tile
fn apply_tile_func<F>(&mut self, pos: Pos, func: F)
where F: Fn(&mut Tile) {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
func(&mut self.tiles[index]);
}
}
fn in_bounds(&self, pos: Pos) -> bool {
let (x, y, z) = pos;
!(0 > x || 0 > y || 0 > z || x >= self.xlen || y >= self.ylen || z >= self.zlen)
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, z) = pos;
(x + y * self.xlen + z * self.xlen * self.ylen) as usize
}
pub fn update_tile(&mut self, new_tile: Tile, pos: Pos) {
self.apply_tile_func(pos, |tile| {
tile.material = new_tile.material;
tile.marked = false;
});
}
pub fn dig(&mut self, pos: Pos) {
let alt = self.get_alt(pos);
self.apply_tile_func(pos, |tile| tile.material = alt);
}
pub fn mark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = true);
}
#[allow(dead_code)]
pub fn unmark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = false);
}
fn grab_material(&self, pos: Pos) -> Option<Material> {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
Some(material.clone())
} else {
None
}
} else {
None
}
}
pub fn get_alt(&self, pos: Pos) -> MaterialID {
if let Some(material) = self.grab_material(pos) {
material.alt
} else {
0
}
}
pub fn diggable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
material.diggable &&!tile.marked
} else {
false
}
} else {
false
}
}
pub fn passable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
match tile.mode {
Mode::Block => false,
_ => true,
}
} else {
false
}
}
#[allow(dead_code)]
pub fn save(&self, path: &str) -> Result<(), Error> {
//Saves map as file. Currently unversioned, so take heed.
let f = try!(File::create(&path));
let mut writer = BufWriter::new(&f);
try!(write!(&mut writer, "{} {} {}\n", self.xlen, self.ylen, self.zlen));
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
try!(write!(&mut writer, "{} ",
self.get_tile((x, y, z)).expect("Malformed map").material));
}
try!(write!(&mut writer, "\n"));
}
try!(write!(&mut writer, "\n"));
}
Ok(())
}
}
impl MapSnapshot {
#[allow(dead_code)]
pub fn print(&self) {
//MapSnapshot debug
//[debug] func
for y in 0..self.ylen {
for x in 0..self.xlen {
let index = self.coords_to_index((x, y, 0));
print!("{0}", self.tiles[index].material % 10);
}
println!();
}
println!();
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, _) = pos;
(x + y * self.xlen) as usize
}
}
pub fn handle_to_snapshot(handle: &CameraHandle, map: &Map) -> MapSnapshot {
//Uses handle and map to generate 2D snapshot
//Eventually 3D snapshots may be enabled
//Base interface method between rendering engine and map
let mut tiles = Vec::with_capacity((handle.xlen * handle.ylen) as usize);
for y in handle.y..handle.y + handle.ylen {
for x in handle.x..handle.x + handle.xlen {
match map.get_tile((x, y, handle.z)) {
//Get_tile returns valid tile
Some(tile) => tiles.push(tile),
//Otherwise display as air
None => tiles.push(AIR_TILE),
}
}
}
MapSnapshot {tiles: tiles, xlen: handle.xlen, ylen: handle.ylen}
}
pub fn blank_map(root: &Path) -> Map {
// Load materials properties file
let materials = init_materials(root);
Map {
tiles: Tiles::new(),
materials: materials,
xlen: 0,
ylen: 0,
zlen: 0,
}
}
pub fn load_map(path: &str, materials: Materials) -> Result<Map, Error> {
// Load map from file. Currently unversioned so take heed.
// Map validation is not performed.
let mut f = try!(File::open(&path));
let mut contents = String::new();
try!(f.read_to_string(&mut contents));
let mut tiles = Vec::new();
let (mut xlen, mut ylen, mut zlen) = (0i32, 0i32, 0i32);
for (i, line) in contents.lines().enumerate() {
if i == 0 {
let mut split_line = line.split(",");
let version: i32 = split_line.next().unwrap().parse().unwrap();
assert!(version >= 1);
xlen = split_line.next().unwrap().parse().unwrap();
ylen = split_line.next().unwrap().parse().unwrap();
zlen = split_line.next().unwrap().parse().unwrap();
} else {
for word in line.split(",") {
let mut word_parts = word.split(";");
if let Some(material_str) = word_parts.next() {
if material_str.len() > 0 {
let material: u16 = material_str.parse().unwrap();
let mode: Mode = match word_parts.next() {
Some(mode_str) => to_mode(mode_str.parse().unwrap()).unwrap(),
None => Mode::Block,
};
tiles.push(Tile::new(material, mode));
}
}
}
}
}
Ok(Map {tiles: tiles, materials: materials, xlen: xlen, ylen: ylen, zlen: zlen})
} | let z = dz * CHUNK_TILES_Z;
let pos = (x, y, z);
let xlen = min(CHUNK_TILES_X, self.xlen - dx * CHUNK_TILES_X);
let ylen = min(CHUNK_TILES_Y, self.ylen - dy * CHUNK_TILES_Y); | random_line_split |
tiles.rs | use std::cmp::min;
use std::fs::File;
use std::path::Path;
use std::io::{Read, Write, BufWriter, Error};
use game::base::*;
use io::base::*;
use map::constants::*;
use map::material::*;
pub type Tiles = Vec<Tile>;
pub type PosUnit = i32;
const CHUNK_TILES_X: PosUnit = 8;
const CHUNK_TILES_Y: PosUnit = 8;
const CHUNK_TILES_Z: PosUnit = 1;
//TODO Clean up unwraps
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
pub struct Tile {
//Single map unit
pub material: MaterialID,
pub mode: Mode,
pub marked: bool,
}
#[derive(Clone)]
pub struct Map {
//Holds the terrain info as a vector of tiles
tiles: Tiles,
pub materials: Materials,
xlen: PosUnit,
ylen: PosUnit,
zlen: PosUnit,
}
#[derive(Debug, Clone)]
pub struct MapSnapshot {
//Represents a slice of the map
//to be be delivered to the rendering engine
pub tiles: Tiles,
pub xlen: PosUnit,
pub ylen: PosUnit,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct | {
pub tiles: Tiles,
pub pos: Pos,
pub xlen: PosUnit,
pub ylen: PosUnit,
pub zlen: PosUnit,
}
pub fn init_map(root: &Path) -> Map {
info!("Initializing map");
let test_path = root.join("static/inc/maps/smol_map_excel.sfm.csv");
let path_str = test_path
.to_str()
.expect("Unicode decode error");
// Load materials properties file
let materials = init_materials(root);
load_map(path_str, materials).expect("Could not load map")
}
impl Tile {
fn new(material: MaterialID, mode: Mode) -> Tile {
Tile {
material: material,
mode: mode,
marked: false,
}
}
}
impl Map {
#[allow(dead_code)]
pub fn print(&self) {
//Debug print method
//[debug] func
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
match self.get_tile((x, y, z)) {
Some(tile) => print!("{0}", tile.material % 10),
None => print!(" "),
}
}
println!();
}
println!();
}
}
pub fn size(&self) -> Pos {
(self.xlen, self.ylen, self.zlen)
}
// Resize map as given with blank tiles
pub fn resize(&mut self, pos: Pos) {
let (x, y, z) = pos;
self.tiles = vec![AIR_TILE; (x * y * z) as usize];
self.xlen = x;
self.ylen = y;
self.zlen = z;
}
pub fn get_chunk(&self, pos: Pos, size: Pos) -> MapChunk {
let (x0, y0, z0) = pos;
let (xlen, ylen, zlen) = size;
let mut tiles = Tiles::new();
for x in x0..(x0 + xlen) {
for y in y0..(y0 + ylen) {
for z in z0..(z0 + zlen) {
let index = self.coords_to_index((x, y, z));
tiles.push(self.tiles[index]);
}
}
}
MapChunk {
tiles: tiles,
pos: pos,
xlen: xlen,
ylen: ylen,
zlen: zlen,
}
}
// TODO Add duplication factor
pub fn to_chunks(&self) -> Vec<MapChunk> {
let mut chunks = Vec::<MapChunk>::new();
let x_chunks = Map::get_num_chunks(self.xlen, CHUNK_TILES_X);
let y_chunks = Map::get_num_chunks(self.ylen, CHUNK_TILES_Y);
let z_chunks = Map::get_num_chunks(self.zlen, CHUNK_TILES_Z);
for dx in 0..x_chunks {
for dy in 0..y_chunks {
for dz in 0..z_chunks {
let x = dx * CHUNK_TILES_X;
let y = dy * CHUNK_TILES_Y;
let z = dz * CHUNK_TILES_Z;
let pos = (x, y, z);
let xlen = min(CHUNK_TILES_X, self.xlen - dx * CHUNK_TILES_X);
let ylen = min(CHUNK_TILES_Y, self.ylen - dy * CHUNK_TILES_Y);
let zlen = min(CHUNK_TILES_Z, self.zlen - dz * CHUNK_TILES_Z);
let size = (xlen, ylen, zlen);
chunks.push(self.get_chunk(pos, size))
}
}
}
chunks
}
fn get_num_chunks(map_len: PosUnit, chunk_len: PosUnit) -> PosUnit {
if map_len % chunk_len == 0 {
map_len / chunk_len
} else {
map_len / chunk_len + 1
}
}
pub fn apply_chunk(&mut self, chunk: &MapChunk) {
let (x0, y0, z0) = chunk.pos;
let mut chunk_i = 0;
for x in 0..chunk.xlen {
for y in 0..chunk.ylen {
for z in 0..chunk.zlen {
let mx = x + x0;
let my = (y + y0) * self.xlen;
let mz = (z + z0) * self.xlen * self.ylen;
let map_i = (mx + my + mz) as usize;
self.tiles[map_i] = chunk.tiles[chunk_i];
chunk_i += 1;
}
}
}
}
/// Tile accesor method
pub fn get_tile(&self, pos: Pos) -> Option<Tile> {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
Some(self.tiles[index])
} else {
None
}
}
/// Perform some mutable operation to a tile
fn apply_tile_func<F>(&mut self, pos: Pos, func: F)
where F: Fn(&mut Tile) {
if self.in_bounds(pos) {
let index = self.coords_to_index(pos);
func(&mut self.tiles[index]);
}
}
fn in_bounds(&self, pos: Pos) -> bool {
let (x, y, z) = pos;
!(0 > x || 0 > y || 0 > z || x >= self.xlen || y >= self.ylen || z >= self.zlen)
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, z) = pos;
(x + y * self.xlen + z * self.xlen * self.ylen) as usize
}
pub fn update_tile(&mut self, new_tile: Tile, pos: Pos) {
self.apply_tile_func(pos, |tile| {
tile.material = new_tile.material;
tile.marked = false;
});
}
pub fn dig(&mut self, pos: Pos) {
let alt = self.get_alt(pos);
self.apply_tile_func(pos, |tile| tile.material = alt);
}
pub fn mark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = true);
}
#[allow(dead_code)]
pub fn unmark(&mut self, pos: Pos) {
self.apply_tile_func(pos, |tile| tile.marked = false);
}
fn grab_material(&self, pos: Pos) -> Option<Material> {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
Some(material.clone())
} else {
None
}
} else {
None
}
}
pub fn get_alt(&self, pos: Pos) -> MaterialID {
if let Some(material) = self.grab_material(pos) {
material.alt
} else {
0
}
}
pub fn diggable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
if let Some(material) = self.materials.get(&tile.material) {
material.diggable &&!tile.marked
} else {
false
}
} else {
false
}
}
pub fn passable(&self, pos: Pos) -> bool {
if let Some(tile) = self.get_tile(pos) {
match tile.mode {
Mode::Block => false,
_ => true,
}
} else {
false
}
}
#[allow(dead_code)]
pub fn save(&self, path: &str) -> Result<(), Error> {
//Saves map as file. Currently unversioned, so take heed.
let f = try!(File::create(&path));
let mut writer = BufWriter::new(&f);
try!(write!(&mut writer, "{} {} {}\n", self.xlen, self.ylen, self.zlen));
for z in 0..self.zlen {
for y in 0..self.ylen {
for x in 0..self.xlen {
try!(write!(&mut writer, "{} ",
self.get_tile((x, y, z)).expect("Malformed map").material));
}
try!(write!(&mut writer, "\n"));
}
try!(write!(&mut writer, "\n"));
}
Ok(())
}
}
impl MapSnapshot {
#[allow(dead_code)]
pub fn print(&self) {
//MapSnapshot debug
//[debug] func
for y in 0..self.ylen {
for x in 0..self.xlen {
let index = self.coords_to_index((x, y, 0));
print!("{0}", self.tiles[index].material % 10);
}
println!();
}
println!();
}
fn coords_to_index(&self, pos: Pos) -> usize {
let (x, y, _) = pos;
(x + y * self.xlen) as usize
}
}
pub fn handle_to_snapshot(handle: &CameraHandle, map: &Map) -> MapSnapshot {
//Uses handle and map to generate 2D snapshot
//Eventually 3D snapshots may be enabled
//Base interface method between rendering engine and map
let mut tiles = Vec::with_capacity((handle.xlen * handle.ylen) as usize);
for y in handle.y..handle.y + handle.ylen {
for x in handle.x..handle.x + handle.xlen {
match map.get_tile((x, y, handle.z)) {
//Get_tile returns valid tile
Some(tile) => tiles.push(tile),
//Otherwise display as air
None => tiles.push(AIR_TILE),
}
}
}
MapSnapshot {tiles: tiles, xlen: handle.xlen, ylen: handle.ylen}
}
pub fn blank_map(root: &Path) -> Map {
// Load materials properties file
let materials = init_materials(root);
Map {
tiles: Tiles::new(),
materials: materials,
xlen: 0,
ylen: 0,
zlen: 0,
}
}
pub fn load_map(path: &str, materials: Materials) -> Result<Map, Error> {
// Load map from file. Currently unversioned so take heed.
// Map validation is not performed.
let mut f = try!(File::open(&path));
let mut contents = String::new();
try!(f.read_to_string(&mut contents));
let mut tiles = Vec::new();
let (mut xlen, mut ylen, mut zlen) = (0i32, 0i32, 0i32);
for (i, line) in contents.lines().enumerate() {
if i == 0 {
let mut split_line = line.split(",");
let version: i32 = split_line.next().unwrap().parse().unwrap();
assert!(version >= 1);
xlen = split_line.next().unwrap().parse().unwrap();
ylen = split_line.next().unwrap().parse().unwrap();
zlen = split_line.next().unwrap().parse().unwrap();
} else {
for word in line.split(",") {
let mut word_parts = word.split(";");
if let Some(material_str) = word_parts.next() {
if material_str.len() > 0 {
let material: u16 = material_str.parse().unwrap();
let mode: Mode = match word_parts.next() {
Some(mode_str) => to_mode(mode_str.parse().unwrap()).unwrap(),
None => Mode::Block,
};
tiles.push(Tile::new(material, mode));
}
}
}
}
}
Ok(Map {tiles: tiles, materials: materials, xlen: xlen, ylen: ylen, zlen: zlen})
}
| MapChunk | identifier_name |
main.rs |
// Get the size of the given type in bytes
fn size_of<T>() -> i32 {
mem::size_of::<T>() as i32
}
// Get an offset in bytes for n units of type T
fn offset<T>(n: u32) -> *const c_void {
(n * mem::size_of::<T>() as u32) as *const T as *const c_void
}
fn read_triangles_from_file() -> Result<Vec<f32>, ()> {
// Takes in an arbitraray amount of trinagles from a file
let mut vertices: Vec<f32>;
match File::open(".\\src\\triangles.txt") {
Ok(mut file) => {
let mut content = String::new();
// Read all the file content into a variable
file.read_to_string(&mut content).unwrap();
vertices = content
.split(" ")
.map(|x| x.parse::<f32>().unwrap())
.collect();
println!("{}", content);
Ok(vertices)
}
// Error handling
Err(error) => {
println!("Error message: {}", error);
std::process::exit(1);
}
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
];
let coordinates: Vec<f32> = vec![
-0.6, -0.6, 0.0,
0.6, -0.6, 0.0,
0.0, 0.6, 0.0
];
let triangle_indices: Vec<u32> = vec![0, 1, 2];
let colors: Vec<f32> = vec![
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0
];
// == // Set up your VAO here
unsafe {
let vao = init_vao(&overLappingCoordinates, &i, &overLappingColors);
}
// Setup uniform locations
let trans_loc: i32;
let time_loc: i32;
let opacity_loc: i32;
unsafe {
// Creates shader. using multiple attaches since they return self, and link them all together at the end
let shdr = shader::ShaderBuilder::new()
.attach_file(".\\shaders\\simple.vert")
.attach_file(".\\shaders\\simple.frag")
.link();
// Get uniform locations
trans_loc = shdr.get_uniform_location("transformation");
time_loc = shdr.get_uniform_location("time");
opacity_loc = shdr.get_uniform_location("opacity");
shdr.activate();
}
// Used to demonstrate keyboard handling -- feel free to remove
let mut _arbitrary_number = 0.0;
let first_frame_time = std::time::Instant::now();
let mut last_frame_time = first_frame_time;
// The main rendering loop
let persp_mat: glm::Mat4 = glm::perspective(
(SCREEN_H as f32) / (SCREEN_W as f32),
90.0,
1.0,
100.0
);
let persp_trans: glm::Mat4 = glm::translation(
&glm::vec3(0.0, 0.0, -2.0)
);
let mut proj: glm::Mat4 = persp_mat * persp_trans;
let model: glm::Mat4 = glm::identity();
let mut trans_matrix: glm::Mat4 = glm::identity();
let mut rot_x = 0.0;
let mut rot_y = 0.0;
let rot_step: f32 = 2.0;
let mut opacity: f32 = 0.0;
let mut v_time:f32 = 0.0;
let mut trans_x = 0.0;
let mut trans_y = 0.0;
let mut trans_z = -4.0;
let trans_step: f32 = 0.1;
let mut view: glm::Mat4 = glm::identity();
loop {
let now = std::time::Instant::now();
let elapsed = now.duration_since(first_frame_time).as_secs_f32();
let delta_time = now.duration_since(last_frame_time).as_secs_f32();
last_frame_time = now;
// Handle keyboard input
if let Ok(keys) = pressed_keys.lock() {
for key in keys.iter() {
// I'm using WASDEQ to handle inputs
// Also use arrowkeys for rotation
match key {
VirtualKeyCode::W => {
trans_z += trans_step;
},
VirtualKeyCode::A => {
trans_x += trans_step;
},
VirtualKeyCode::S => {
trans_z -= trans_step;
},
VirtualKeyCode::D => {
trans_x -= trans_step;
},
VirtualKeyCode::E => {
trans_y -= trans_step;
},
VirtualKeyCode::Q => {
trans_y += trans_step;
},
VirtualKeyCode::R => {
// Reset camera
view = glm::identity();
},
VirtualKeyCode::Up => {
rot_x -= rot_step;
},
VirtualKeyCode::Down => {
rot_x += rot_step;
},
VirtualKeyCode::Left => {
rot_y -= rot_step;
},
VirtualKeyCode::Right => {
rot_y += rot_step;
},
_ => {}
}
}
}
// Handle mouse movement. delta contains the x and y movement of the mouse since last frame in pixels
if let Ok(mut delta) = mouse_delta.lock() {
*delta = (0.0, 0.0);
}
opacity = (elapsed * 10.0).sin() / 2.0 + 0.6;
v_time = elapsed.sin();
let trans: glm::Mat4 = glm::translation(&glm::vec3(trans_x, trans_y, trans_z));
let rot: glm::Mat4 = glm::rotation(rot_x.to_radians(), &glm::vec3(1.0, 0.0, 0.0)) * glm::rotation(rot_y.to_radians(), &glm::vec3(0.0, 1.0, 0.0));
let scale: glm::Mat4 = glm::identity();
view = rot * trans * view;
let mut mod_view = view * model;
// Transmat here becomes MVP matrix after getting built up by model,
// view ( rotation, translation ), and projection
let trans_mat = proj * mod_view;
//Billboard task
/*
mod_view.m11 = 1.0;
mod_view.m12 = 0.0;
mod_view.m13 = 0.0;
mod_view.m21 = 0.0;
mod_view.m22 = 1.0;
mod_view.m23 = 0.0;
mod_view.m31 = 0.0;
mod_view.m32 = 0.0;
mod_view.m33 = 1.0;
*/
// Reset values
trans_x = 0.0;
trans_y = 0.0;
trans_z = 0.0;
rot_y = 0.0;
rot_x = 0.0;
unsafe {
gl::ClearColor(0.76862745, 0.71372549, 0.94901961, 1.0); // moon raker, full opacity
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
// Now we can use these uniforms in our shaders
gl::Uniform1f(opacity_loc, opacity);
gl::Uniform1f(time_loc, v_time);
gl::UniformMatrix4fv(trans_loc, 1, gl::FALSE, trans_mat.as_ptr());
// Issue the necessary commands to draw your scene here
// We have 15 indices for the 5 triangles, 3 for 1 and so on
let num_of_indices = 3 * 3;
let num_of_square_indices = 6;
gl::DrawElements(
gl::TRIANGLES,
num_of_indices,
gl::UNSIGNED_INT,
ptr::null(),
);
}
context.swap_buffers().unwrap();
}
});
// Keep track of the health of the rendering thread
let render_thread_healthy = Arc::new(RwLock::new(true));
let render_thread_watchdog = Arc::clone(&render_thread_healthy);
thread::spawn(move || {
if!render_thread.join().is_ok() {
if let Ok(mut health) = render_thread_watchdog.write() {
println!("Render thread panicked!");
*health = false;
}
}
});
// Start the event loop -- This is where window events get handled
el.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
// Terminate program if render thread panics
if let Ok(health) = render_thread_healthy.read() {
if *health == false {
*control_flow = ControlFlow::Exit;
}
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
// Keep track of currently pressed keys to send to the rendering thread
Event::WindowEvent {
event:
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: key_state,
virtual_keycode: Some(keycode),
..
},
..
},
..
} => {
if let Ok(mut keys) = arc_pressed_keys.lock() {
match key_state {
Released => {
if keys.contains(&keycode) {
let i = keys.iter().position(|&k| k == keycode).unwrap();
| {
&val[0] as *const T as *const c_void
} | identifier_body |
|
main.rs | ] as *const T as *const c_void
}
// Get the size of the given type in bytes
fn size_of<T>() -> i32 {
mem::size_of::<T>() as i32
}
// Get an offset in bytes for n units of type T
fn offset<T>(n: u32) -> *const c_void {
(n * mem::size_of::<T>() as u32) as *const T as *const c_void
}
fn read_triangles_from_file() -> Result<Vec<f32>, ()> {
// Takes in an arbitraray amount of trinagles from a file
let mut vertices: Vec<f32>;
match File::open(".\\src\\triangles.txt") {
Ok(mut file) => {
let mut content = String::new();
// Read all the file content into a variable
file.read_to_string(&mut content).unwrap();
vertices = content
.split(" ")
.map(|x| x.parse::<f32>().unwrap())
.collect();
println!("{}", content);
Ok(vertices)
}
// Error handling
Err(error) => |
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
];
let coordinates: Vec<f32> = vec![
-0.6, -0.6, 0.0,
0.6, -0.6, 0.0,
0.0, 0.6, 0.0
];
let triangle_indices: Vec<u32> = vec![0, 1, 2];
let colors: Vec<f32> = vec![
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0
];
// == // Set up your VAO here
unsafe {
let vao = init_vao(&overLappingCoordinates, &i, &overLappingColors);
}
// Setup uniform locations
let trans_loc: i32;
let time_loc: i32;
let opacity_loc: i32;
unsafe {
// Creates shader. using multiple attaches since they return self, and link them all together at the end
let shdr = shader::ShaderBuilder::new()
.attach_file(".\\shaders\\simple.vert")
.attach_file(".\\shaders\\simple.frag")
.link();
// Get uniform locations
trans_loc = shdr.get_uniform_location("transformation");
time_loc = shdr.get_uniform_location("time");
opacity_loc = shdr.get_uniform_location("opacity");
shdr.activate();
}
// Used to demonstrate keyboard handling -- feel free to remove
let mut _arbitrary_number = 0.0;
let first_frame_time = std::time::Instant::now();
let mut last_frame_time = first_frame_time;
// The main rendering loop
let persp_mat: glm::Mat4 = glm::perspective(
(SCREEN_H as f32) / (SCREEN_W as f32),
90.0,
1.0,
100.0
);
let persp_trans: glm::Mat4 = glm::translation(
&glm::vec3(0.0, 0.0, -2.0)
);
let mut proj: glm::Mat4 = persp_mat * persp_trans;
let model: glm::Mat4 = glm::identity();
let mut trans_matrix: glm::Mat4 = glm::identity();
let mut rot_x = 0.0;
let mut rot_y = 0.0;
let rot_step: f32 = 2.0;
let mut opacity: f32 = 0.0;
let mut v_time:f32 = 0.0;
let mut trans_x = 0.0;
let mut trans_y = 0.0;
let mut trans_z = -4.0;
let trans_step: f32 = 0.1;
let mut view: glm::Mat4 = glm::identity();
loop {
let now = std::time::Instant::now();
let elapsed = now.duration_since(first_frame_time).as_secs_f32();
let delta_time = now.duration_since(last_frame_time).as_secs_f32();
last_frame_time = now;
// Handle keyboard input
if let Ok(keys) = pressed_keys.lock() {
for key in keys.iter() {
// I'm using WASDEQ to handle inputs
// Also use arrowkeys for rotation
match key {
VirtualKeyCode::W => {
trans_z += trans_step;
},
VirtualKeyCode::A => {
trans_x += trans_step;
},
VirtualKeyCode::S => {
trans_z -= trans_step;
},
VirtualKeyCode::D => {
trans_x -= trans_step;
},
VirtualKeyCode::E => {
trans_y -= trans_step;
},
VirtualKeyCode::Q => {
trans_y += trans_step;
},
VirtualKeyCode::R => {
// Reset camera
view = glm::identity();
},
VirtualKeyCode::Up => {
rot_x -= rot_step;
},
VirtualKeyCode::Down => {
rot_x += rot_step;
},
VirtualKeyCode::Left => {
rot_y -= rot_step;
},
VirtualKeyCode::Right => {
rot_y += rot_step;
},
_ => {}
}
}
}
// Handle mouse movement. delta contains the x and y movement of the mouse since last frame in pixels
if let Ok(mut delta) = mouse_delta.lock() {
*delta = (0.0, 0.0);
}
opacity = (elapsed * 10.0).sin() / 2.0 + 0.6;
v_time = elapsed.sin();
let trans: glm::Mat4 = glm::translation(&glm::vec3(trans_x, trans_y, trans_z));
let rot: glm::Mat4 = glm::rotation(rot_x.to_radians(), &glm::vec3(1.0, 0.0, 0.0)) * glm::rotation(rot_y.to_radians(), &glm::vec3(0.0, 1.0, 0.0));
let scale: glm::Mat4 = glm::identity();
view = rot * trans * view;
let mut mod_view = view * model;
// Transmat here becomes MVP matrix after getting built up by model,
// view ( rotation, translation ), and projection
let trans_mat = proj * mod_view;
//Billboard task
/*
mod_view.m11 = 1.0;
mod_view.m12 = 0.0;
mod_view.m13 = 0.0;
mod_view.m21 = 0.0;
mod_view.m22 = 1.0;
mod_view.m23 = 0.0;
mod_view.m31 = 0.0;
mod_view.m32 = 0.0;
mod_view.m33 = 1.0;
*/
// Reset values
trans_x = 0.0;
trans_y = 0.0;
trans_z = 0.0;
rot_y = 0.0;
rot_x = 0.0;
unsafe {
gl::ClearColor(0.76862745, 0.71372549, 0.94901961, 1.0); // moon raker, full opacity
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
// Now we can use these uniforms in our shaders
gl::Uniform1f(opacity_loc, opacity);
gl::Uniform1f(time_loc, v_time);
gl::UniformMatrix4fv(trans_loc, 1, gl::FALSE, trans_mat.as_ptr());
// Issue the necessary commands to draw your scene here
// We have 15 indices for the 5 triangles, 3 for 1 and so on
let num_of_indices = 3 * 3;
let num_of_square_indices = 6;
gl::DrawElements(
gl::TRIANGLES,
num_of_indices,
gl::UNSIGNED_INT,
ptr::null(),
);
}
context.swap_buffers().unwrap();
}
});
// Keep track of the health of the rendering thread
let render_thread_healthy = Arc::new(RwLock::new(true));
let render_thread_watchdog = Arc::clone(&render_thread_healthy);
thread::spawn(move || {
if!render_thread.join().is_ok() {
if let Ok(mut health) = render_thread_watchdog.write() {
println!("Render thread panicked!");
*health = false;
}
}
});
// Start the event loop -- This is where window events get handled
el.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
// Terminate program if render thread panics
if let Ok(health) = render_thread_healthy.read() {
if *health == false {
*control_flow = ControlFlow::Exit;
}
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
// Keep track of currently pressed keys to send to the rendering thread
Event::WindowEvent {
event:
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: key_state,
virtual_keycode: Some(keycode),
..
},
..
},
..
} => {
if let Ok(mut keys) = arc_pressed_keys.lock() {
match key_state {
Released => {
if keys.contains(&keycode) {
let i = keys.iter().position(|&k| k == keycode).unwrap();
keys | {
println!("Error message: {}", error);
std::process::exit(1);
} | conditional_block |
main.rs | 0] as *const T as *const c_void
}
// Get the size of the given type in bytes
fn size_of<T>() -> i32 {
mem::size_of::<T>() as i32
}
// Get an offset in bytes for n units of type T
fn offset<T>(n: u32) -> *const c_void {
(n * mem::size_of::<T>() as u32) as *const T as *const c_void
}
fn read_triangles_from_file() -> Result<Vec<f32>, ()> {
// Takes in an arbitraray amount of trinagles from a file
let mut vertices: Vec<f32>;
match File::open(".\\src\\triangles.txt") {
Ok(mut file) => {
let mut content = String::new();
// Read all the file content into a variable
file.read_to_string(&mut content).unwrap();
vertices = content
.split(" ")
.map(|x| x.parse::<f32>().unwrap())
.collect();
println!("{}", content);
Ok(vertices)
}
// Error handling
Err(error) => {
println!("Error message: {}", error);
std::process::exit(1);
}
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
];
let coordinates: Vec<f32> = vec![
-0.6, -0.6, 0.0,
0.6, -0.6, 0.0,
0.0, 0.6, 0.0
];
let triangle_indices: Vec<u32> = vec![0, 1, 2];
let colors: Vec<f32> = vec![
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0
];
// == // Set up your VAO here
unsafe {
let vao = init_vao(&overLappingCoordinates, &i, &overLappingColors);
}
// Setup uniform locations
let trans_loc: i32;
let time_loc: i32;
let opacity_loc: i32;
unsafe {
// Creates shader. using multiple attaches since they return self, and link them all together at the end
let shdr = shader::ShaderBuilder::new()
.attach_file(".\\shaders\\simple.vert")
.attach_file(".\\shaders\\simple.frag")
.link();
// Get uniform locations
trans_loc = shdr.get_uniform_location("transformation");
time_loc = shdr.get_uniform_location("time");
opacity_loc = shdr.get_uniform_location("opacity");
shdr.activate();
}
// Used to demonstrate keyboard handling -- feel free to remove
let mut _arbitrary_number = 0.0;
let first_frame_time = std::time::Instant::now();
let mut last_frame_time = first_frame_time;
// The main rendering loop
let persp_mat: glm::Mat4 = glm::perspective(
(SCREEN_H as f32) / (SCREEN_W as f32),
90.0,
1.0,
100.0
);
let persp_trans: glm::Mat4 = glm::translation(
&glm::vec3(0.0, 0.0, -2.0)
);
let mut proj: glm::Mat4 = persp_mat * persp_trans;
let model: glm::Mat4 = glm::identity();
let mut trans_matrix: glm::Mat4 = glm::identity();
let mut rot_x = 0.0;
let mut rot_y = 0.0;
let rot_step: f32 = 2.0;
let mut opacity: f32 = 0.0;
let mut v_time:f32 = 0.0;
let mut trans_x = 0.0;
let mut trans_y = 0.0;
let mut trans_z = -4.0;
let trans_step: f32 = 0.1;
let mut view: glm::Mat4 = glm::identity();
loop {
let now = std::time::Instant::now();
let elapsed = now.duration_since(first_frame_time).as_secs_f32();
let delta_time = now.duration_since(last_frame_time).as_secs_f32();
last_frame_time = now;
// Handle keyboard input
if let Ok(keys) = pressed_keys.lock() {
for key in keys.iter() {
// I'm using WASDEQ to handle inputs
// Also use arrowkeys for rotation
match key {
VirtualKeyCode::W => {
trans_z += trans_step;
},
VirtualKeyCode::A => {
trans_x += trans_step;
},
VirtualKeyCode::S => {
trans_z -= trans_step;
},
VirtualKeyCode::D => {
trans_x -= trans_step;
},
VirtualKeyCode::E => {
trans_y -= trans_step;
},
VirtualKeyCode::Q => {
trans_y += trans_step;
},
VirtualKeyCode::R => {
// Reset camera
view = glm::identity();
},
VirtualKeyCode::Up => {
rot_x -= rot_step;
},
VirtualKeyCode::Down => {
rot_x += rot_step;
}, | VirtualKeyCode::Right => {
rot_y += rot_step;
},
_ => {}
}
}
}
// Handle mouse movement. delta contains the x and y movement of the mouse since last frame in pixels
if let Ok(mut delta) = mouse_delta.lock() {
*delta = (0.0, 0.0);
}
opacity = (elapsed * 10.0).sin() / 2.0 + 0.6;
v_time = elapsed.sin();
let trans: glm::Mat4 = glm::translation(&glm::vec3(trans_x, trans_y, trans_z));
let rot: glm::Mat4 = glm::rotation(rot_x.to_radians(), &glm::vec3(1.0, 0.0, 0.0)) * glm::rotation(rot_y.to_radians(), &glm::vec3(0.0, 1.0, 0.0));
let scale: glm::Mat4 = glm::identity();
view = rot * trans * view;
let mut mod_view = view * model;
// Transmat here becomes MVP matrix after getting built up by model,
// view ( rotation, translation ), and projection
let trans_mat = proj * mod_view;
//Billboard task
/*
mod_view.m11 = 1.0;
mod_view.m12 = 0.0;
mod_view.m13 = 0.0;
mod_view.m21 = 0.0;
mod_view.m22 = 1.0;
mod_view.m23 = 0.0;
mod_view.m31 = 0.0;
mod_view.m32 = 0.0;
mod_view.m33 = 1.0;
*/
// Reset values
trans_x = 0.0;
trans_y = 0.0;
trans_z = 0.0;
rot_y = 0.0;
rot_x = 0.0;
unsafe {
gl::ClearColor(0.76862745, 0.71372549, 0.94901961, 1.0); // moon raker, full opacity
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
// Now we can use these uniforms in our shaders
gl::Uniform1f(opacity_loc, opacity);
gl::Uniform1f(time_loc, v_time);
gl::UniformMatrix4fv(trans_loc, 1, gl::FALSE, trans_mat.as_ptr());
// Issue the necessary commands to draw your scene here
// We have 15 indices for the 5 triangles, 3 for 1 and so on
let num_of_indices = 3 * 3;
let num_of_square_indices = 6;
gl::DrawElements(
gl::TRIANGLES,
num_of_indices,
gl::UNSIGNED_INT,
ptr::null(),
);
}
context.swap_buffers().unwrap();
}
});
// Keep track of the health of the rendering thread
let render_thread_healthy = Arc::new(RwLock::new(true));
let render_thread_watchdog = Arc::clone(&render_thread_healthy);
thread::spawn(move || {
if!render_thread.join().is_ok() {
if let Ok(mut health) = render_thread_watchdog.write() {
println!("Render thread panicked!");
*health = false;
}
}
});
// Start the event loop -- This is where window events get handled
el.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
// Terminate program if render thread panics
if let Ok(health) = render_thread_healthy.read() {
if *health == false {
*control_flow = ControlFlow::Exit;
}
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
// Keep track of currently pressed keys to send to the rendering thread
Event::WindowEvent {
event:
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: key_state,
virtual_keycode: Some(keycode),
..
},
..
},
..
} => {
if let Ok(mut keys) = arc_pressed_keys.lock() {
match key_state {
Released => {
if keys.contains(&keycode) {
let i = keys.iter().position(|&k| k == keycode).unwrap();
keys.remove | VirtualKeyCode::Left => {
rot_y -= rot_step;
}, | random_line_split |
main.rs | ] as *const T as *const c_void
}
// Get the size of the given type in bytes
fn size_of<T>() -> i32 {
mem::size_of::<T>() as i32
}
// Get an offset in bytes for n units of type T
fn | <T>(n: u32) -> *const c_void {
(n * mem::size_of::<T>() as u32) as *const T as *const c_void
}
fn read_triangles_from_file() -> Result<Vec<f32>, ()> {
// Takes in an arbitraray amount of trinagles from a file
let mut vertices: Vec<f32>;
match File::open(".\\src\\triangles.txt") {
Ok(mut file) => {
let mut content = String::new();
// Read all the file content into a variable
file.read_to_string(&mut content).unwrap();
vertices = content
.split(" ")
.map(|x| x.parse::<f32>().unwrap())
.collect();
println!("{}", content);
Ok(vertices)
}
// Error handling
Err(error) => {
println!("Error message: {}", error);
std::process::exit(1);
}
}
}
// Get a null pointer (equivalent to an offset of 0)
// ptr::null()
// let p = 0 as *const c_void
// == // Modify and complete the function below for the first task
unsafe fn init_vao(vertices: &Vec<f32>, indices: &Vec<u32>, colors: &Vec<f32>) -> u32 {
// Returns the ID of the newly instantiated vertex array object upon its creation
// VAO - way to bind vbo with spesification
let mut vao: u32 = 0; // Create
gl::GenVertexArrays(1, &mut vao); // Generate
gl::BindVertexArray(vao); // Bind
// VBO - buffer for the vertices/positions
let mut vbo: u32 = 0;
gl::GenBuffers(1, &mut vbo); // creates buffer, generates an id for the vertex buffer - stored on vram
gl::BindBuffer(gl::ARRAY_BUFFER, vbo); // Binding is sort of like creating layers in photoshop
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&vertices),
pointer_to_array(&vertices),
gl::STATIC_DRAW,
);
// Vaa = Vertex attrib array
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const c_void);
gl::EnableVertexAttribArray(0);
// CBO - vbo for the color buffer, RGBA
let mut cbo: u32 = 1;
gl::GenBuffers(1, &mut cbo);
gl::BindBuffer(gl::ARRAY_BUFFER, cbo);
gl::BufferData(
gl::ARRAY_BUFFER,
byte_size_of_array(&colors),
pointer_to_array(&colors),
gl::STATIC_DRAW,
);
// 2nd attribute buffer is for colors
gl::VertexAttribPointer(1, 4, gl::FLOAT, gl::FALSE, size_of::<f32>() * 4, 0 as *const c_void);
gl::EnableVertexAttribArray(1);
// Index buffer object = connect the dots, multiple usecases for same vertices.
let mut ibo: u32 = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
byte_size_of_array(&indices),
pointer_to_array(&indices),
gl::STATIC_DRAW,
);
vao
}
fn main() {
// Set up the necessary objects to deal with windows and event handling
let el = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("Gloom-rs")
.with_resizable(false)
.with_inner_size(glutin::dpi::LogicalSize::new(SCREEN_W, SCREEN_H));
let cb = glutin::ContextBuilder::new().with_vsync(true);
let windowed_context = cb.build_windowed(wb, &el).unwrap();
// Uncomment these if you want to use the mouse for controls, but want it to be confined to the screen and/or invisible.
// windowed_context.window().set_cursor_grab(true).expect("failed to grab cursor");
// windowed_context.window().set_cursor_visible(false);
// Set up a shared vector for keeping track of currently pressed keys
let arc_pressed_keys = Arc::new(Mutex::new(Vec::<VirtualKeyCode>::with_capacity(10)));
// Make a reference of this vector to send to the render thread
let pressed_keys = Arc::clone(&arc_pressed_keys);
// Set up shared tuple for tracking mouse movement between frames
let arc_mouse_delta = Arc::new(Mutex::new((0f32, 0f32)));
// Make a reference of this tuple to send to the render thread
let mouse_delta = Arc::clone(&arc_mouse_delta);
// Spawn a separate thread for rendering, so event handling doesn't block rendering
let render_thread = thread::spawn(move || {
// Acquire the OpenGL Context and load the function pointers. This has to be done inside of the rendering thread, because
// an active OpenGL context cannot safely traverse a thread boundary
let context = unsafe {
let c = windowed_context.make_current().unwrap();
gl::load_with(|symbol| c.get_proc_address(symbol) as *const _);
c
};
// Set up openGL
unsafe {
gl::Enable(gl::DEPTH_TEST);
gl::DepthFunc(gl::LESS);
gl::Enable(gl::CULL_FACE);
gl::Disable(gl::MULTISAMPLE);
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
gl::Enable(gl::DEBUG_OUTPUT_SYNCHRONOUS);
gl::DebugMessageCallback(Some(util::debug_callback), ptr::null());
// Print some diagnostics
println!(
"{}: {}",
util::get_gl_string(gl::VENDOR),
util::get_gl_string(gl::RENDERER)
);
println!("OpenGL\t: {}", util::get_gl_string(gl::VERSION));
println!(
"GLSL\t: {}",
util::get_gl_string(gl::SHADING_LANGUAGE_VERSION)
);
}
let c: Vec<f32> = vec![
-0.8, -0.6, 0.0,
-0.5, -0.6, 0.0,
-0.65, -0.2, 0.0,
0.5, -0.6, 0.0,
0.8, -0.6, 0.0,
0.65, -0.2, 0.0,
-0.2, 0.3, 0.0,
0.2, 0.6, 0.0,
0.0, 0.6, 0.0,
];
let i: Vec<u32> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8];
let col: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
1.0, 0.0, 0.0, 0.9,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
0.0, 0.0, 1.0, 0.7,
];
let overLappingCoordinates: Vec<f32> = vec![
-0.3, 0.0, 0.7,
0.3, 0.0, 0.7,
0.0, 0.5, 0.7,
-0.1, 0.3, 0.8,
0.3, 0.0, 0.8,
0.3, 0.6, 0.8,
-0.4, 0.6, 0.6,
-0.4, 0.0, 0.6,
0.2, 0.3, 0.6
];
let overLappingColors: Vec<f32> = vec![
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
1.0, 0.0, 0.0, 0.6,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 1.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
0.0, 0.0, 1.0, 0.9,
];
let coordinates: Vec<f32> = vec![
-0.6, -0.6, 0.0,
0.6, -0.6, 0.0,
0.0, 0.6, 0.0
];
let triangle_indices: Vec<u32> = vec![0, 1, 2];
let colors: Vec<f32> = vec![
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0
];
// == // Set up your VAO here
unsafe {
let vao = init_vao(&overLappingCoordinates, &i, &overLappingColors);
}
// Setup uniform locations
let trans_loc: i32;
let time_loc: i32;
let opacity_loc: i32;
unsafe {
// Creates shader. using multiple attaches since they return self, and link them all together at the end
let shdr = shader::ShaderBuilder::new()
.attach_file(".\\shaders\\simple.vert")
.attach_file(".\\shaders\\simple.frag")
.link();
// Get uniform locations
trans_loc = shdr.get_uniform_location("transformation");
time_loc = shdr.get_uniform_location("time");
opacity_loc = shdr.get_uniform_location("opacity");
shdr.activate();
}
// Used to demonstrate keyboard handling -- feel free to remove
let mut _arbitrary_number = 0.0;
let first_frame_time = std::time::Instant::now();
let mut last_frame_time = first_frame_time;
// The main rendering loop
let persp_mat: glm::Mat4 = glm::perspective(
(SCREEN_H as f32) / (SCREEN_W as f32),
90.0,
1.0,
100.0
);
let persp_trans: glm::Mat4 = glm::translation(
&glm::vec3(0.0, 0.0, -2.0)
);
let mut proj: glm::Mat4 = persp_mat * persp_trans;
let model: glm::Mat4 = glm::identity();
let mut trans_matrix: glm::Mat4 = glm::identity();
let mut rot_x = 0.0;
let mut rot_y = 0.0;
let rot_step: f32 = 2.0;
let mut opacity: f32 = 0.0;
let mut v_time:f32 = 0.0;
let mut trans_x = 0.0;
let mut trans_y = 0.0;
let mut trans_z = -4.0;
let trans_step: f32 = 0.1;
let mut view: glm::Mat4 = glm::identity();
loop {
let now = std::time::Instant::now();
let elapsed = now.duration_since(first_frame_time).as_secs_f32();
let delta_time = now.duration_since(last_frame_time).as_secs_f32();
last_frame_time = now;
// Handle keyboard input
if let Ok(keys) = pressed_keys.lock() {
for key in keys.iter() {
// I'm using WASDEQ to handle inputs
// Also use arrowkeys for rotation
match key {
VirtualKeyCode::W => {
trans_z += trans_step;
},
VirtualKeyCode::A => {
trans_x += trans_step;
},
VirtualKeyCode::S => {
trans_z -= trans_step;
},
VirtualKeyCode::D => {
trans_x -= trans_step;
},
VirtualKeyCode::E => {
trans_y -= trans_step;
},
VirtualKeyCode::Q => {
trans_y += trans_step;
},
VirtualKeyCode::R => {
// Reset camera
view = glm::identity();
},
VirtualKeyCode::Up => {
rot_x -= rot_step;
},
VirtualKeyCode::Down => {
rot_x += rot_step;
},
VirtualKeyCode::Left => {
rot_y -= rot_step;
},
VirtualKeyCode::Right => {
rot_y += rot_step;
},
_ => {}
}
}
}
// Handle mouse movement. delta contains the x and y movement of the mouse since last frame in pixels
if let Ok(mut delta) = mouse_delta.lock() {
*delta = (0.0, 0.0);
}
opacity = (elapsed * 10.0).sin() / 2.0 + 0.6;
v_time = elapsed.sin();
let trans: glm::Mat4 = glm::translation(&glm::vec3(trans_x, trans_y, trans_z));
let rot: glm::Mat4 = glm::rotation(rot_x.to_radians(), &glm::vec3(1.0, 0.0, 0.0)) * glm::rotation(rot_y.to_radians(), &glm::vec3(0.0, 1.0, 0.0));
let scale: glm::Mat4 = glm::identity();
view = rot * trans * view;
let mut mod_view = view * model;
// Transmat here becomes MVP matrix after getting built up by model,
// view ( rotation, translation ), and projection
let trans_mat = proj * mod_view;
//Billboard task
/*
mod_view.m11 = 1.0;
mod_view.m12 = 0.0;
mod_view.m13 = 0.0;
mod_view.m21 = 0.0;
mod_view.m22 = 1.0;
mod_view.m23 = 0.0;
mod_view.m31 = 0.0;
mod_view.m32 = 0.0;
mod_view.m33 = 1.0;
*/
// Reset values
trans_x = 0.0;
trans_y = 0.0;
trans_z = 0.0;
rot_y = 0.0;
rot_x = 0.0;
unsafe {
gl::ClearColor(0.76862745, 0.71372549, 0.94901961, 1.0); // moon raker, full opacity
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
// Now we can use these uniforms in our shaders
gl::Uniform1f(opacity_loc, opacity);
gl::Uniform1f(time_loc, v_time);
gl::UniformMatrix4fv(trans_loc, 1, gl::FALSE, trans_mat.as_ptr());
// Issue the necessary commands to draw your scene here
// We have 15 indices for the 5 triangles, 3 for 1 and so on
let num_of_indices = 3 * 3;
let num_of_square_indices = 6;
gl::DrawElements(
gl::TRIANGLES,
num_of_indices,
gl::UNSIGNED_INT,
ptr::null(),
);
}
context.swap_buffers().unwrap();
}
});
// Keep track of the health of the rendering thread
let render_thread_healthy = Arc::new(RwLock::new(true));
let render_thread_watchdog = Arc::clone(&render_thread_healthy);
thread::spawn(move || {
if!render_thread.join().is_ok() {
if let Ok(mut health) = render_thread_watchdog.write() {
println!("Render thread panicked!");
*health = false;
}
}
});
// Start the event loop -- This is where window events get handled
el.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
// Terminate program if render thread panics
if let Ok(health) = render_thread_healthy.read() {
if *health == false {
*control_flow = ControlFlow::Exit;
}
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
// Keep track of currently pressed keys to send to the rendering thread
Event::WindowEvent {
event:
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: key_state,
virtual_keycode: Some(keycode),
..
},
..
},
..
} => {
if let Ok(mut keys) = arc_pressed_keys.lock() {
match key_state {
Released => {
if keys.contains(&keycode) {
let i = keys.iter().position(|&k| k == keycode).unwrap();
keys | offset | identifier_name |
cfg.rs | use anyhow::{bail, Context, Result};
use expanduser::expanduser;
use log::{debug, warn};
use std::collections::HashMap;
use std::default::Default;
use std::fmt::Display;
use std::fs::{read_dir, read_to_string};
use std::path::PathBuf;
use yaml_rust::{yaml::Hash, Yaml, YamlLoader};
use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS};
const CONTROLS_ENHANCED: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`');
use crate::util::*;
/// The main configuration
#[derive(Debug)]
pub struct Config {
/// Authentication settings to use if no host-specific authentication settings specified.
pub auth: Auth,
/// Default host to upload to.
default_host: Option<String>,
/// List of all configured hosts.
hosts: HashMap<String, Host>,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
pub expire: Option<String>,
/// Length of prefix to use unless overwritten in host
pub prefix_length: u8,
/// Compute hash on remote side after upload to verify.
pub verify_via_hash: bool,
}
/// Authentication configuration
#[derive(Debug, Clone)]
pub struct Auth {
/// Try to use auth information for the given host from openssh settings
pub from_openssh: bool,
/// Perform interactive authentication (if private key is set password will be used for private
/// key instead).
pub interactive: bool,
/// Perform authentication via explicit private key
pub private_key_file: Option<String>,
/// Explicit password for private key (unsafe)
pub private_key_file_password: Option<String>,
/// Perform agent authentication
pub use_agent: bool,
}
/// A host entry
#[derive(Debug)]
pub struct Host {
/// Alias under which the host is known
pub alias: String,
/// Overwrite global authentication settings for this host.
pub auth: Auth,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
///
/// Overrides the global setting.
pub expire: Option<String>,
/// In which folder do we store files on the host.
pub folder: PathBuf,
/// In case files on the remote site need to have a special group setting in order to be
/// readable by the webserver.
pub group: Option<String>,
/// Self-explanatory (if not set alias will be used)
pub hostname: Option<String>,
/// If the user REALLY REALLY wants to, a plaintext password can be provided (but it is not
/// recommended!).
pub password: Option<String>,
/// Length of prefix to use
pub prefix_length: u8,
/// url-prefix to apply to file link
pub url: String,
/// The user to sign in, otherwise ssh config will be used.
pub user: Option<String>,
}
fn default_config_directories() -> Vec<&'static str> {
vec!["~/.config/asfa", "/etc/asfa"]
}
pub fn load<T: AsRef<str> + Display>(path: &Option<T>) -> Result<Config> {
let possible_paths: Vec<&str> = match path {
Some(path) => vec![path.as_ref()],
None => default_config_directories(),
};
for path in possible_paths.iter() {
match Config::load(path)? {
None => continue,
Some(cfg) => return Ok(cfg),
}
}
bail!("Did not find valid configuration!");
}
#[allow(dead_code)]
pub fn dummy_host_str() -> &'static str {
include_str!("dummy_host.yml")
}
#[allow(dead_code)]
pub fn dummy_host() -> Host {
Host::from_yaml(
"dummy_host".to_string(),
&YamlLoader::load_from_str(dummy_host_str()).unwrap()[0],
)
.unwrap()
}
impl Default for Config {
fn default() -> Self {
Config {
auth: Auth::default(),
default_host: None,
expire: None,
hosts: HashMap::new(),
prefix_length: 32,
verify_via_hash: true,
}
}
}
impl Config {
pub fn load<T: AsRef<str> + Display>(dir: T) -> Result<Option<Config>> {
let config_dir = match expanduser(dir.as_ref()) {
Ok(p) => p,
Err(e) => {
bail!("Error when expanding path to config file: {}", e);
}
};
let global = {
let mut global = config_dir.clone();
global.push("config.yaml");
global
};
let raw: String = match read_to_string(&global) {
Err(e) => {
debug!(
"Could not read configuration file '{}', error: {}",
global.to_str().unwrap_or("invalid"),
e
);
return Ok(None);
}
Ok(raw) => raw,
};
let mut config = Self::from_yaml(&raw)?;
let hosts_dir = {
let mut hosts_dir = config_dir;
hosts_dir.push("hosts");
hosts_dir
};
if hosts_dir.is_dir() {
for entry in read_dir(&hosts_dir)? {
let possible_host = entry?.path();
match possible_host.extension() {
None => {
continue;
}
Some(ext) => {
if ext!= "yaml" {
continue;
}
}
};
let alias = match possible_host.file_stem() {
None => {
warn!(
"Could not extract file stem for: {}",
possible_host.display()
);
continue;
}
Some(alias) => alias
.to_str()
.context("Could not convert host file name to String.")?
.to_string(),
};
if config.hosts.contains_key(&alias) {
bail!("Host {} configured in config.yaml and as host-file.", alias);
};
let host_yaml = YamlLoader::load_from_str(&read_to_string(&possible_host)?)?;
let error = format!("Invalid host-file for host {}", &alias);
let host =
Host::from_yaml_with_config(alias, &host_yaml[0], &config).context(error)?;
config.hosts.insert(host.alias.clone(), host);
}
}
Ok(Some(config))
}
pub fn from_yaml(input: &str) -> Result<Config> {
let documents = match YamlLoader::load_from_str(input) {
Ok(data) => data,
Err(e) => {
bail!("Error while loading config file: {}", e);
}
};
let mut config = Config::default();
let config_yaml = match &documents[0] {
Yaml::Hash(h) => h,
_ => {
bail!("Root object in configuration file is no dictionary!");
}
};
config.prefix_length = {
let length = get_int_from(config_yaml, "prefix_length")?
.cloned()
.unwrap_or(config.prefix_length as i64);
check_prefix_length(length)?;
length as u8
};
config.auth = if let Some(Yaml::Hash(auth)) = config_yaml.get(&yaml_string("auth")) {
match Auth::from_yaml(&auth, None) {
Ok(auth) => auth,
Err(e) => {
bail!("Could not read global authentication settings: {}", e);
}
}
} else {
config.auth
};
config.default_host =
std::env::var("ASFA_HOST")
.ok()
.or(get_string_from(config_yaml, "default_host")?.cloned());
config.expire = get_string_from(config_yaml, "expire")?.cloned();
config.verify_via_hash = get_bool_from(config_yaml, "verify_via_hash")?
.cloned()
.unwrap_or(config.verify_via_hash);
match config_yaml.get(&yaml_string("hosts")) {
Some(Yaml::Hash(dict)) => {
for entry in dict.clone().entries() {
let alias = match entry.key() {
Yaml::String(alias) => alias.to_string(),
invalid => {
warn!("Found invalid alias for host entry: {:?}", invalid);
continue;
}
};
let host_yaml = entry.get();
let host = Host::from_yaml_with_config(alias.clone(), host_yaml, &config)?;
config.hosts.insert(alias, host);
}
}
// Some(Yaml::Array(a)) => a,
Some(_) => {
bail!("'hosts' entry in config file needs to be dictionary mapping host-alias to configuration!");
}
None => {
debug!("No 'hosts'-entry in config file.");
}
};
Ok(config)
}
pub fn get_host<T: AsRef<str>>(&self, alias: Option<T>) -> Result<&Host> {
match alias
.as_ref()
.map(|a| a.as_ref())
.or_else(|| self.default_host.as_deref())
{
None => match self.hosts.len() {
0 => {
bail!("No hosts configured, define some!");
}
1 => Ok(self.hosts.values().next().unwrap()),
_ => {
bail!("More than one host entry defined but neither `default_host` set in config or --config given via command line.");
}
},
Some(alias) => Ok(self
.hosts
.get(alias)
.with_context(|| format!("Did not find alias: {}", alias))?),
}
}
}
impl Host {
fn from_yaml(alias: String, input: &Yaml) -> Result<Host> {
Self::from_yaml_with_config(alias, input, &Config::default())
}
fn from_yaml_with_config(alias: String, input: &Yaml, config: &Config) -> Result<Host> {
log::trace!("Reading host: {}", alias);
if let Yaml::Hash(dict) = input {
let url = get_required(dict, "url", get_string_from)?.clone();
let hostname = get_string_from(dict, "hostname")?.cloned();
let user = get_string_from(dict, "user")?.cloned();
let expire = get_string_from(dict, "expire")?
.cloned()
.or_else(|| config.expire.clone());
let folder = expanduser(get_required(dict, "folder", get_string_from)?)?;
let group = get_string_from(dict, "group")?.cloned();
let auth = match get_dict_from(dict, "auth")? {
Some(auth) => Auth::from_yaml(auth, Some(&config.auth))?,
None => config.auth.clone(),
};
let prefix_length = match get_int_from(dict, "prefix_length")? {
Some(prefix) => {
check_prefix_length(*prefix)?;
*prefix as u8
}
None => config.prefix_length,
};
let password = get_string_from(dict, "password")?.cloned();
Ok(Host {
alias,
auth,
expire,
folder,
group,
hostname,
password,
prefix_length,
url,
user,
})
} else {
bail!("Invalid yaml data for Host-alias '{}'", alias);
}
}
pub fn get_url(&self, file: &str) -> Result<String> {
Ok(format!(
"{}/{}",
&self.url,
utf8_percent_encode(file, CONTROLS_ENHANCED)
))
}
}
impl Auth {
fn from_yaml(dict: &Hash, default: Option<&Auth>) -> Result<Auth, InvalidYamlTypeError> {
let auth_default = Self::default();
let default = default.unwrap_or(&auth_default);
let use_agent = get_bool_from(dict, "use_agent")?
.cloned()
.unwrap_or(default.use_agent);
let interactive = get_bool_from(dict, "interactive")?
.cloned()
.unwrap_or(default.interactive);
let private_key_file = get_string_from(dict, "private_key_file")?
.cloned()
.or_else(|| default.private_key_file.clone());
let private_key_file_password = get_string_from(dict, "private_key_file_password")?
.cloned()
.or_else(|| default.private_key_file_password.clone());
let from_openssh = get_bool_from(dict, "from_openssh")?
.cloned()
.unwrap_or(default.from_openssh);
Ok(Auth {
from_openssh,
interactive,
private_key_file,
private_key_file_password,
use_agent,
})
}
}
impl Default for Auth {
fn default() -> Self {
Auth {
from_openssh: true,
interactive: true,
private_key_file: None,
private_key_file_password: None,
use_agent: true,
}
}
}
fn check_prefix_length(length: i64) -> Result<()> {
if!(8..=128).contains(&length) { | }
Ok(())
}
#[cfg(test)]
mod tests {
use crate::util;
#[test]
fn load_example_config() {
util::test::init().unwrap();
let cfg = crate::cfg::Config::load("example-config/asfa")
.unwrap()
.unwrap();
log::debug!("Loaded: {:?}", cfg);
assert_eq!(&cfg.hosts.len(), &2);
assert_eq!(&cfg.default_host.clone().unwrap(), &"my-remote-site");
assert_eq!(
&cfg.get_host(Some("my-remote-site-2")).unwrap().hostname,
&Some("my-hostname-2.eu".to_string())
);
}
} | bail! {"Prefix needs to be between 8 and 128 characters."}; | random_line_split |
cfg.rs | use anyhow::{bail, Context, Result};
use expanduser::expanduser;
use log::{debug, warn};
use std::collections::HashMap;
use std::default::Default;
use std::fmt::Display;
use std::fs::{read_dir, read_to_string};
use std::path::PathBuf;
use yaml_rust::{yaml::Hash, Yaml, YamlLoader};
use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS};
const CONTROLS_ENHANCED: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`');
use crate::util::*;
/// The main configuration
#[derive(Debug)]
pub struct Config {
/// Authentication settings to use if no host-specific authentication settings specified.
pub auth: Auth,
/// Default host to upload to.
default_host: Option<String>,
/// List of all configured hosts.
hosts: HashMap<String, Host>,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
pub expire: Option<String>,
/// Length of prefix to use unless overwritten in host
pub prefix_length: u8,
/// Compute hash on remote side after upload to verify.
pub verify_via_hash: bool,
}
/// Authentication configuration
#[derive(Debug, Clone)]
pub struct Auth {
/// Try to use auth information for the given host from openssh settings
pub from_openssh: bool,
/// Perform interactive authentication (if private key is set password will be used for private
/// key instead).
pub interactive: bool,
/// Perform authentication via explicit private key
pub private_key_file: Option<String>,
/// Explicit password for private key (unsafe)
pub private_key_file_password: Option<String>,
/// Perform agent authentication
pub use_agent: bool,
}
/// A host entry
#[derive(Debug)]
pub struct Host {
/// Alias under which the host is known
pub alias: String,
/// Overwrite global authentication settings for this host.
pub auth: Auth,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
///
/// Overrides the global setting.
pub expire: Option<String>,
/// In which folder do we store files on the host.
pub folder: PathBuf,
/// In case files on the remote site need to have a special group setting in order to be
/// readable by the webserver.
pub group: Option<String>,
/// Self-explanatory (if not set alias will be used)
pub hostname: Option<String>,
/// If the user REALLY REALLY wants to, a plaintext password can be provided (but it is not
/// recommended!).
pub password: Option<String>,
/// Length of prefix to use
pub prefix_length: u8,
/// url-prefix to apply to file link
pub url: String,
/// The user to sign in, otherwise ssh config will be used.
pub user: Option<String>,
}
fn default_config_directories() -> Vec<&'static str> {
vec!["~/.config/asfa", "/etc/asfa"]
}
pub fn load<T: AsRef<str> + Display>(path: &Option<T>) -> Result<Config> {
let possible_paths: Vec<&str> = match path {
Some(path) => vec![path.as_ref()],
None => default_config_directories(),
};
for path in possible_paths.iter() {
match Config::load(path)? {
None => continue,
Some(cfg) => return Ok(cfg),
}
}
bail!("Did not find valid configuration!");
}
#[allow(dead_code)]
pub fn dummy_host_str() -> &'static str {
include_str!("dummy_host.yml")
}
#[allow(dead_code)]
pub fn dummy_host() -> Host {
Host::from_yaml(
"dummy_host".to_string(),
&YamlLoader::load_from_str(dummy_host_str()).unwrap()[0],
)
.unwrap()
}
impl Default for Config {
fn default() -> Self |
}
impl Config {
pub fn load<T: AsRef<str> + Display>(dir: T) -> Result<Option<Config>> {
let config_dir = match expanduser(dir.as_ref()) {
Ok(p) => p,
Err(e) => {
bail!("Error when expanding path to config file: {}", e);
}
};
let global = {
let mut global = config_dir.clone();
global.push("config.yaml");
global
};
let raw: String = match read_to_string(&global) {
Err(e) => {
debug!(
"Could not read configuration file '{}', error: {}",
global.to_str().unwrap_or("invalid"),
e
);
return Ok(None);
}
Ok(raw) => raw,
};
let mut config = Self::from_yaml(&raw)?;
let hosts_dir = {
let mut hosts_dir = config_dir;
hosts_dir.push("hosts");
hosts_dir
};
if hosts_dir.is_dir() {
for entry in read_dir(&hosts_dir)? {
let possible_host = entry?.path();
match possible_host.extension() {
None => {
continue;
}
Some(ext) => {
if ext!= "yaml" {
continue;
}
}
};
let alias = match possible_host.file_stem() {
None => {
warn!(
"Could not extract file stem for: {}",
possible_host.display()
);
continue;
}
Some(alias) => alias
.to_str()
.context("Could not convert host file name to String.")?
.to_string(),
};
if config.hosts.contains_key(&alias) {
bail!("Host {} configured in config.yaml and as host-file.", alias);
};
let host_yaml = YamlLoader::load_from_str(&read_to_string(&possible_host)?)?;
let error = format!("Invalid host-file for host {}", &alias);
let host =
Host::from_yaml_with_config(alias, &host_yaml[0], &config).context(error)?;
config.hosts.insert(host.alias.clone(), host);
}
}
Ok(Some(config))
}
pub fn from_yaml(input: &str) -> Result<Config> {
let documents = match YamlLoader::load_from_str(input) {
Ok(data) => data,
Err(e) => {
bail!("Error while loading config file: {}", e);
}
};
let mut config = Config::default();
let config_yaml = match &documents[0] {
Yaml::Hash(h) => h,
_ => {
bail!("Root object in configuration file is no dictionary!");
}
};
config.prefix_length = {
let length = get_int_from(config_yaml, "prefix_length")?
.cloned()
.unwrap_or(config.prefix_length as i64);
check_prefix_length(length)?;
length as u8
};
config.auth = if let Some(Yaml::Hash(auth)) = config_yaml.get(&yaml_string("auth")) {
match Auth::from_yaml(&auth, None) {
Ok(auth) => auth,
Err(e) => {
bail!("Could not read global authentication settings: {}", e);
}
}
} else {
config.auth
};
config.default_host =
std::env::var("ASFA_HOST")
.ok()
.or(get_string_from(config_yaml, "default_host")?.cloned());
config.expire = get_string_from(config_yaml, "expire")?.cloned();
config.verify_via_hash = get_bool_from(config_yaml, "verify_via_hash")?
.cloned()
.unwrap_or(config.verify_via_hash);
match config_yaml.get(&yaml_string("hosts")) {
Some(Yaml::Hash(dict)) => {
for entry in dict.clone().entries() {
let alias = match entry.key() {
Yaml::String(alias) => alias.to_string(),
invalid => {
warn!("Found invalid alias for host entry: {:?}", invalid);
continue;
}
};
let host_yaml = entry.get();
let host = Host::from_yaml_with_config(alias.clone(), host_yaml, &config)?;
config.hosts.insert(alias, host);
}
}
// Some(Yaml::Array(a)) => a,
Some(_) => {
bail!("'hosts' entry in config file needs to be dictionary mapping host-alias to configuration!");
}
None => {
debug!("No 'hosts'-entry in config file.");
}
};
Ok(config)
}
pub fn get_host<T: AsRef<str>>(&self, alias: Option<T>) -> Result<&Host> {
match alias
.as_ref()
.map(|a| a.as_ref())
.or_else(|| self.default_host.as_deref())
{
None => match self.hosts.len() {
0 => {
bail!("No hosts configured, define some!");
}
1 => Ok(self.hosts.values().next().unwrap()),
_ => {
bail!("More than one host entry defined but neither `default_host` set in config or --config given via command line.");
}
},
Some(alias) => Ok(self
.hosts
.get(alias)
.with_context(|| format!("Did not find alias: {}", alias))?),
}
}
}
impl Host {
fn from_yaml(alias: String, input: &Yaml) -> Result<Host> {
Self::from_yaml_with_config(alias, input, &Config::default())
}
fn from_yaml_with_config(alias: String, input: &Yaml, config: &Config) -> Result<Host> {
log::trace!("Reading host: {}", alias);
if let Yaml::Hash(dict) = input {
let url = get_required(dict, "url", get_string_from)?.clone();
let hostname = get_string_from(dict, "hostname")?.cloned();
let user = get_string_from(dict, "user")?.cloned();
let expire = get_string_from(dict, "expire")?
.cloned()
.or_else(|| config.expire.clone());
let folder = expanduser(get_required(dict, "folder", get_string_from)?)?;
let group = get_string_from(dict, "group")?.cloned();
let auth = match get_dict_from(dict, "auth")? {
Some(auth) => Auth::from_yaml(auth, Some(&config.auth))?,
None => config.auth.clone(),
};
let prefix_length = match get_int_from(dict, "prefix_length")? {
Some(prefix) => {
check_prefix_length(*prefix)?;
*prefix as u8
}
None => config.prefix_length,
};
let password = get_string_from(dict, "password")?.cloned();
Ok(Host {
alias,
auth,
expire,
folder,
group,
hostname,
password,
prefix_length,
url,
user,
})
} else {
bail!("Invalid yaml data for Host-alias '{}'", alias);
}
}
pub fn get_url(&self, file: &str) -> Result<String> {
Ok(format!(
"{}/{}",
&self.url,
utf8_percent_encode(file, CONTROLS_ENHANCED)
))
}
}
impl Auth {
fn from_yaml(dict: &Hash, default: Option<&Auth>) -> Result<Auth, InvalidYamlTypeError> {
let auth_default = Self::default();
let default = default.unwrap_or(&auth_default);
let use_agent = get_bool_from(dict, "use_agent")?
.cloned()
.unwrap_or(default.use_agent);
let interactive = get_bool_from(dict, "interactive")?
.cloned()
.unwrap_or(default.interactive);
let private_key_file = get_string_from(dict, "private_key_file")?
.cloned()
.or_else(|| default.private_key_file.clone());
let private_key_file_password = get_string_from(dict, "private_key_file_password")?
.cloned()
.or_else(|| default.private_key_file_password.clone());
let from_openssh = get_bool_from(dict, "from_openssh")?
.cloned()
.unwrap_or(default.from_openssh);
Ok(Auth {
from_openssh,
interactive,
private_key_file,
private_key_file_password,
use_agent,
})
}
}
impl Default for Auth {
fn default() -> Self {
Auth {
from_openssh: true,
interactive: true,
private_key_file: None,
private_key_file_password: None,
use_agent: true,
}
}
}
fn check_prefix_length(length: i64) -> Result<()> {
if!(8..=128).contains(&length) {
bail! {"Prefix needs to be between 8 and 128 characters."};
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::util;
#[test]
fn load_example_config() {
util::test::init().unwrap();
let cfg = crate::cfg::Config::load("example-config/asfa")
.unwrap()
.unwrap();
log::debug!("Loaded: {:?}", cfg);
assert_eq!(&cfg.hosts.len(), &2);
assert_eq!(&cfg.default_host.clone().unwrap(), &"my-remote-site");
assert_eq!(
&cfg.get_host(Some("my-remote-site-2")).unwrap().hostname,
&Some("my-hostname-2.eu".to_string())
);
}
}
| {
Config {
auth: Auth::default(),
default_host: None,
expire: None,
hosts: HashMap::new(),
prefix_length: 32,
verify_via_hash: true,
}
} | identifier_body |
cfg.rs | use anyhow::{bail, Context, Result};
use expanduser::expanduser;
use log::{debug, warn};
use std::collections::HashMap;
use std::default::Default;
use std::fmt::Display;
use std::fs::{read_dir, read_to_string};
use std::path::PathBuf;
use yaml_rust::{yaml::Hash, Yaml, YamlLoader};
use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS};
const CONTROLS_ENHANCED: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`');
use crate::util::*;
/// The main configuration
#[derive(Debug)]
pub struct Config {
/// Authentication settings to use if no host-specific authentication settings specified.
pub auth: Auth,
/// Default host to upload to.
default_host: Option<String>,
/// List of all configured hosts.
hosts: HashMap<String, Host>,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
pub expire: Option<String>,
/// Length of prefix to use unless overwritten in host
pub prefix_length: u8,
/// Compute hash on remote side after upload to verify.
pub verify_via_hash: bool,
}
/// Authentication configuration
#[derive(Debug, Clone)]
pub struct Auth {
/// Try to use auth information for the given host from openssh settings
pub from_openssh: bool,
/// Perform interactive authentication (if private key is set password will be used for private
/// key instead).
pub interactive: bool,
/// Perform authentication via explicit private key
pub private_key_file: Option<String>,
/// Explicit password for private key (unsafe)
pub private_key_file_password: Option<String>,
/// Perform agent authentication
pub use_agent: bool,
}
/// A host entry
#[derive(Debug)]
pub struct Host {
/// Alias under which the host is known
pub alias: String,
/// Overwrite global authentication settings for this host.
pub auth: Auth,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
///
/// Overrides the global setting.
pub expire: Option<String>,
/// In which folder do we store files on the host.
pub folder: PathBuf,
/// In case files on the remote site need to have a special group setting in order to be
/// readable by the webserver.
pub group: Option<String>,
/// Self-explanatory (if not set alias will be used)
pub hostname: Option<String>,
/// If the user REALLY REALLY wants to, a plaintext password can be provided (but it is not
/// recommended!).
pub password: Option<String>,
/// Length of prefix to use
pub prefix_length: u8,
/// url-prefix to apply to file link
pub url: String,
/// The user to sign in, otherwise ssh config will be used.
pub user: Option<String>,
}
fn default_config_directories() -> Vec<&'static str> {
vec!["~/.config/asfa", "/etc/asfa"]
}
pub fn load<T: AsRef<str> + Display>(path: &Option<T>) -> Result<Config> {
let possible_paths: Vec<&str> = match path {
Some(path) => vec![path.as_ref()],
None => default_config_directories(),
};
for path in possible_paths.iter() {
match Config::load(path)? {
None => continue,
Some(cfg) => return Ok(cfg),
}
}
bail!("Did not find valid configuration!");
}
#[allow(dead_code)]
pub fn dummy_host_str() -> &'static str {
include_str!("dummy_host.yml")
}
#[allow(dead_code)]
pub fn dummy_host() -> Host {
Host::from_yaml(
"dummy_host".to_string(),
&YamlLoader::load_from_str(dummy_host_str()).unwrap()[0],
)
.unwrap()
}
impl Default for Config {
fn default() -> Self {
Config {
auth: Auth::default(),
default_host: None,
expire: None,
hosts: HashMap::new(),
prefix_length: 32,
verify_via_hash: true,
}
}
}
impl Config {
pub fn load<T: AsRef<str> + Display>(dir: T) -> Result<Option<Config>> {
let config_dir = match expanduser(dir.as_ref()) {
Ok(p) => p,
Err(e) => {
bail!("Error when expanding path to config file: {}", e);
}
};
let global = {
let mut global = config_dir.clone();
global.push("config.yaml");
global
};
let raw: String = match read_to_string(&global) {
Err(e) => {
debug!(
"Could not read configuration file '{}', error: {}",
global.to_str().unwrap_or("invalid"),
e
);
return Ok(None);
}
Ok(raw) => raw,
};
let mut config = Self::from_yaml(&raw)?;
let hosts_dir = {
let mut hosts_dir = config_dir;
hosts_dir.push("hosts");
hosts_dir
};
if hosts_dir.is_dir() {
for entry in read_dir(&hosts_dir)? {
let possible_host = entry?.path();
match possible_host.extension() {
None => {
continue;
}
Some(ext) => {
if ext!= "yaml" {
continue;
}
}
};
let alias = match possible_host.file_stem() {
None => {
warn!(
"Could not extract file stem for: {}",
possible_host.display()
);
continue;
}
Some(alias) => alias
.to_str()
.context("Could not convert host file name to String.")?
.to_string(),
};
if config.hosts.contains_key(&alias) {
bail!("Host {} configured in config.yaml and as host-file.", alias);
};
let host_yaml = YamlLoader::load_from_str(&read_to_string(&possible_host)?)?;
let error = format!("Invalid host-file for host {}", &alias);
let host =
Host::from_yaml_with_config(alias, &host_yaml[0], &config).context(error)?;
config.hosts.insert(host.alias.clone(), host);
}
}
Ok(Some(config))
}
pub fn from_yaml(input: &str) -> Result<Config> {
let documents = match YamlLoader::load_from_str(input) {
Ok(data) => data,
Err(e) => {
bail!("Error while loading config file: {}", e);
}
};
let mut config = Config::default();
let config_yaml = match &documents[0] {
Yaml::Hash(h) => h,
_ => {
bail!("Root object in configuration file is no dictionary!");
}
};
config.prefix_length = {
let length = get_int_from(config_yaml, "prefix_length")?
.cloned()
.unwrap_or(config.prefix_length as i64);
check_prefix_length(length)?;
length as u8
};
config.auth = if let Some(Yaml::Hash(auth)) = config_yaml.get(&yaml_string("auth")) {
match Auth::from_yaml(&auth, None) {
Ok(auth) => auth,
Err(e) => {
bail!("Could not read global authentication settings: {}", e);
}
}
} else {
config.auth
};
config.default_host =
std::env::var("ASFA_HOST")
.ok()
.or(get_string_from(config_yaml, "default_host")?.cloned());
config.expire = get_string_from(config_yaml, "expire")?.cloned();
config.verify_via_hash = get_bool_from(config_yaml, "verify_via_hash")?
.cloned()
.unwrap_or(config.verify_via_hash);
match config_yaml.get(&yaml_string("hosts")) {
Some(Yaml::Hash(dict)) => {
for entry in dict.clone().entries() {
let alias = match entry.key() {
Yaml::String(alias) => alias.to_string(),
invalid => {
warn!("Found invalid alias for host entry: {:?}", invalid);
continue;
}
};
let host_yaml = entry.get();
let host = Host::from_yaml_with_config(alias.clone(), host_yaml, &config)?;
config.hosts.insert(alias, host);
}
}
// Some(Yaml::Array(a)) => a,
Some(_) => {
bail!("'hosts' entry in config file needs to be dictionary mapping host-alias to configuration!");
}
None => {
debug!("No 'hosts'-entry in config file.");
}
};
Ok(config)
}
pub fn get_host<T: AsRef<str>>(&self, alias: Option<T>) -> Result<&Host> {
match alias
.as_ref()
.map(|a| a.as_ref())
.or_else(|| self.default_host.as_deref())
{
None => match self.hosts.len() {
0 => {
bail!("No hosts configured, define some!");
}
1 => Ok(self.hosts.values().next().unwrap()),
_ => {
bail!("More than one host entry defined but neither `default_host` set in config or --config given via command line.");
}
},
Some(alias) => Ok(self
.hosts
.get(alias)
.with_context(|| format!("Did not find alias: {}", alias))?),
}
}
}
impl Host {
fn | (alias: String, input: &Yaml) -> Result<Host> {
Self::from_yaml_with_config(alias, input, &Config::default())
}
fn from_yaml_with_config(alias: String, input: &Yaml, config: &Config) -> Result<Host> {
log::trace!("Reading host: {}", alias);
if let Yaml::Hash(dict) = input {
let url = get_required(dict, "url", get_string_from)?.clone();
let hostname = get_string_from(dict, "hostname")?.cloned();
let user = get_string_from(dict, "user")?.cloned();
let expire = get_string_from(dict, "expire")?
.cloned()
.or_else(|| config.expire.clone());
let folder = expanduser(get_required(dict, "folder", get_string_from)?)?;
let group = get_string_from(dict, "group")?.cloned();
let auth = match get_dict_from(dict, "auth")? {
Some(auth) => Auth::from_yaml(auth, Some(&config.auth))?,
None => config.auth.clone(),
};
let prefix_length = match get_int_from(dict, "prefix_length")? {
Some(prefix) => {
check_prefix_length(*prefix)?;
*prefix as u8
}
None => config.prefix_length,
};
let password = get_string_from(dict, "password")?.cloned();
Ok(Host {
alias,
auth,
expire,
folder,
group,
hostname,
password,
prefix_length,
url,
user,
})
} else {
bail!("Invalid yaml data for Host-alias '{}'", alias);
}
}
pub fn get_url(&self, file: &str) -> Result<String> {
Ok(format!(
"{}/{}",
&self.url,
utf8_percent_encode(file, CONTROLS_ENHANCED)
))
}
}
impl Auth {
fn from_yaml(dict: &Hash, default: Option<&Auth>) -> Result<Auth, InvalidYamlTypeError> {
let auth_default = Self::default();
let default = default.unwrap_or(&auth_default);
let use_agent = get_bool_from(dict, "use_agent")?
.cloned()
.unwrap_or(default.use_agent);
let interactive = get_bool_from(dict, "interactive")?
.cloned()
.unwrap_or(default.interactive);
let private_key_file = get_string_from(dict, "private_key_file")?
.cloned()
.or_else(|| default.private_key_file.clone());
let private_key_file_password = get_string_from(dict, "private_key_file_password")?
.cloned()
.or_else(|| default.private_key_file_password.clone());
let from_openssh = get_bool_from(dict, "from_openssh")?
.cloned()
.unwrap_or(default.from_openssh);
Ok(Auth {
from_openssh,
interactive,
private_key_file,
private_key_file_password,
use_agent,
})
}
}
impl Default for Auth {
fn default() -> Self {
Auth {
from_openssh: true,
interactive: true,
private_key_file: None,
private_key_file_password: None,
use_agent: true,
}
}
}
fn check_prefix_length(length: i64) -> Result<()> {
if!(8..=128).contains(&length) {
bail! {"Prefix needs to be between 8 and 128 characters."};
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::util;
#[test]
fn load_example_config() {
util::test::init().unwrap();
let cfg = crate::cfg::Config::load("example-config/asfa")
.unwrap()
.unwrap();
log::debug!("Loaded: {:?}", cfg);
assert_eq!(&cfg.hosts.len(), &2);
assert_eq!(&cfg.default_host.clone().unwrap(), &"my-remote-site");
assert_eq!(
&cfg.get_host(Some("my-remote-site-2")).unwrap().hostname,
&Some("my-hostname-2.eu".to_string())
);
}
}
| from_yaml | identifier_name |
error.rs | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use core::{convert::TryInto, fmt, ptr::NonNull, task::Poll};
use errno::{errno, Errno};
use libc::c_char;
use s2n_tls_sys::*;
use std::{convert::TryFrom, ffi::CStr};
#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorType {
UnknownErrorType,
NoError,
IOError,
ConnectionClosed,
Blocked,
Alert,
ProtocolError,
InternalError,
UsageError,
Application,
}
#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorSource {
Library,
Bindings,
Application,
}
impl From<libc::c_int> for ErrorType {
fn from(input: libc::c_int) -> Self {
match input as s2n_error_type::Type {
s2n_error_type::OK => ErrorType::NoError,
s2n_error_type::IO => ErrorType::IOError,
s2n_error_type::CLOSED => ErrorType::ConnectionClosed,
s2n_error_type::BLOCKED => ErrorType::Blocked,
s2n_error_type::ALERT => ErrorType::Alert,
s2n_error_type::PROTO => ErrorType::ProtocolError,
s2n_error_type::INTERNAL => ErrorType::InternalError,
s2n_error_type::USAGE => ErrorType::UsageError,
_ => ErrorType::UnknownErrorType,
}
}
}
enum Context {
InvalidInput,
MissingWaker,
Code(s2n_status_code::Type, Errno),
Application(Box<dyn std::error::Error + Send + Sync +'static>),
}
pub struct Error(Context);
pub trait Fallible {
type Output;
fn into_result(self) -> Result<Self::Output, Error>;
}
impl Fallible for s2n_status_code::Type {
type Output = s2n_status_code::Type;
fn into_result(self) -> Result<Self::Output, Error> {
if self >= s2n_status_code::SUCCESS {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl Fallible for isize {
type Output = usize;
fn into_result(self) -> Result<Self::Output, Error> {
// Negative values can't be converted to a real size
// and instead indicate an error.
self.try_into().map_err(|_| Error::capture())
}
}
impl Fallible for u64 {
type Output = Self;
/// Converts a u64 to a Result by checking for u64::MAX.
///
/// If a method that returns an unsigned int is fallible,
/// then the -1 error result wraps around to u64::MAX.
///
/// For a u64 to be Fallible, a result of u64::MAX must not be
/// possible without an error. For example, [`s2n_connection_get_delay`]
/// can't return u64::MAX as a valid result because
/// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
/// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
/// would therefore only return u64::MAX for a -1 error result.
fn into_result(self) -> Result<Self::Output, Error> {
if self!= Self::MAX {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *mut T {
type Output = NonNull<T>;
fn into_result(self) -> Result<Self::Output, Error> {
if let Some(value) = NonNull::new(self) {
Ok(value)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *const T {
type Output = *const T;
fn into_result(self) -> Result<Self::Output, Error> {
if!self.is_null() {
Ok(self)
} else {
Err(Error::capture())
}
}
}
pub trait Pollable {
type Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>>;
}
impl<T: Fallible> Pollable for T {
type Output = T::Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>> {
match self.into_result() {
Ok(r) => Ok(r).into(),
Err(err) if err.is_retryable() => Poll::Pending,
Err(err) => Err(err).into(),
}
}
}
impl Error {
pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);
/// Converts an io::Error into an s2n-tls Error
pub fn io_error(err: std::io::Error) -> Error {
let errno = err.raw_os_error().unwrap_or(1);
errno::set_errno(errno::Errno(errno));
s2n_status_code::FAILURE.into_result().unwrap_err()
}
/// An error occurred while running application code.
///
/// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
/// async task failure.
pub fn application(error: Box<dyn std::error::Error + Send + Sync +'static>) -> Self {
Self(Context::Application(error))
}
fn capture() -> Self {
unsafe {
let s2n_errno = s2n_errno_location();
let code = *s2n_errno;
// https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
//# To avoid possible confusion, s2n_errno should be cleared after processing
//# an error: s2n_errno = S2N_ERR_T_OK
*s2n_errno = s2n_error_type::OK as _;
Self(Context::Code(code, errno()))
}
}
pub fn name(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "InvalidInput",
Context::MissingWaker => "MissingWaker",
Context::Application(_) => "ApplicationError",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror_name(code))
},
}
}
pub fn message(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "A parameter was incorrect",
Context::MissingWaker => {
"Tried to perform an asynchronous operation without a configured waker"
}
Context::Application(_) => "An error occurred while executing application code",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror(code, core::ptr::null()))
},
}
}
pub fn debug(&self) -> Option<&'static str> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => unsafe {
let debug_info = s2n_strerror_debug(code, core::ptr::null());
// The debug string should be set to a constant static string
// when an error occurs, but because it starts out as NULL
// we should defend against mistakes.
if debug_info.is_null() {
None
} else {
// If the string is not null, then we can assume that
// it is constant and static.
Some(cstr_to_str(debug_info))
}
},
}
} | match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
Context::Application(_) => ErrorType::Application,
Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
}
}
pub fn source(&self) -> ErrorSource {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
Context::Application(_) => ErrorSource::Application,
Context::Code(_, _) => ErrorSource::Library,
}
}
#[allow(clippy::borrowed_box)]
/// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
/// otherwise returns None.
pub fn application_error(&self) -> Option<&Box<dyn std::error::Error + Send + Sync +'static>> {
if let Self(Context::Application(err)) = self {
Some(err)
} else {
None
}
}
pub fn is_retryable(&self) -> bool {
matches!(self.kind(), ErrorType::Blocked)
}
}
#[cfg(feature = "quic")]
impl Error {
/// s2n-tls does not send specific errors.
///
/// However, we can attempt to map local errors into the alerts
/// that we would have sent if we sent alerts.
///
/// This API is currently incomplete and should not be relied upon.
pub fn alert(&self) -> Option<u8> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => {
let mut alert = 0;
let r = unsafe { s2n_error_get_alert(code, &mut alert) };
match r.into_result() {
Ok(_) => Some(alert),
Err(_) => None,
}
}
}
}
}
/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
let slice = CStr::from_ptr(v);
let bytes = slice.to_bytes();
core::str::from_utf8_unchecked(bytes)
}
impl TryFrom<std::io::Error> for Error {
type Error = Error;
fn try_from(value: std::io::Error) -> Result<Self, Self::Error> {
let io_inner = value.into_inner().ok_or(Error::INVALID_INPUT)?;
io_inner
.downcast::<Self>()
.map(|error| *error)
.map_err(|_| Error::INVALID_INPUT)
}
}
impl From<Error> for std::io::Error {
fn from(input: Error) -> Self {
if let Context::Code(_, errno) = input.0 {
if ErrorType::IOError == input.kind() {
let bare = std::io::Error::from_raw_os_error(errno.0);
return std::io::Error::new(bare.kind(), input);
}
}
std::io::Error::new(std::io::ErrorKind::Other, input)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Error");
if let Context::Code(code, _) = self.0 {
s.field("code", &code);
}
s.field("name", &self.name());
s.field("message", &self.message());
s.field("kind", &self.kind());
s.field("source", &self.source());
if let Some(debug) = self.debug() {
s.field("debug", &debug);
}
// "errno" is only known to be meaningful for IOErrors.
// However, it has occasionally proved useful for debugging
// other errors, so include it for all errors.
if let Context::Code(_, errno) = self.0 {
s.field("errno", &errno.to_string());
}
s.finish()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Self(Context::Application(err)) = self {
err.fmt(f)
} else {
f.write_str(self.message())
}
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
// implement `source` in the same way `std::io::Error` implements it:
// https://doc.rust-lang.org/std/io/struct.Error.html#method.source
if let Self(Context::Application(err)) = self {
err.source()
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{enums::Version, testing::client_hello::CustomError};
use errno::set_errno;
const FAILURE: isize = -1;
// This relies on an implementation detail of s2n-tls errors,
// and could make these tests brittle. However, the alternative
// is a real handshake producing a real IO error, so just updating
// this value if the definition of an IO error changes might be easier.
const S2N_IO_ERROR_CODE: s2n_status_code::Type = 1 << 26;
#[test]
fn s2n_io_error_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
set_errno(Errno(libc::ECONNRESET));
unsafe {
let s2n_errno_ptr = s2n_errno_location();
*s2n_errno_ptr = S2N_IO_ERROR_CODE;
}
let s2n_error = FAILURE.into_result().unwrap_err();
assert_eq!(ErrorType::IOError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::ConnectionReset, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn s2n_error_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
set_errno(Errno(libc::ECONNRESET));
unsafe {
let s2n_errno_ptr = s2n_errno_location();
*s2n_errno_ptr = S2N_IO_ERROR_CODE - 1;
}
let s2n_error = FAILURE.into_result().unwrap_err();
assert_ne!(ErrorType::IOError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::Other, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn invalid_input_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
let s2n_error = Version::try_from(0).unwrap_err();
assert_eq!(ErrorType::UsageError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::Other, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn error_source() -> Result<(), Box<dyn std::error::Error>> {
let bindings_error = Version::try_from(0).unwrap_err();
assert_eq!(ErrorSource::Bindings, bindings_error.source());
let library_error = FAILURE.into_result().unwrap_err();
assert_eq!(ErrorSource::Library, library_error.source());
Ok(())
}
#[test]
fn application_error() {
// test single level errors
{
let error = Error::application(Box::new(CustomError));
let app_error = error.application_error().unwrap();
let _custom_error = app_error.downcast_ref::<CustomError>().unwrap();
}
// make sure nested errors work
{
let io_error = std::io::Error::new(std::io::ErrorKind::Other, CustomError);
let error = Error::application(Box::new(io_error));
let app_error = error.application_error().unwrap();
let io_error = app_error.downcast_ref::<std::io::Error>().unwrap();
let _custom_error = io_error
.get_ref()
.unwrap()
.downcast_ref::<CustomError>()
.unwrap();
}
}
} |
pub fn kind(&self) -> ErrorType { | random_line_split |
error.rs | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use core::{convert::TryInto, fmt, ptr::NonNull, task::Poll};
use errno::{errno, Errno};
use libc::c_char;
use s2n_tls_sys::*;
use std::{convert::TryFrom, ffi::CStr};
#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorType {
UnknownErrorType,
NoError,
IOError,
ConnectionClosed,
Blocked,
Alert,
ProtocolError,
InternalError,
UsageError,
Application,
}
#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorSource {
Library,
Bindings,
Application,
}
impl From<libc::c_int> for ErrorType {
fn from(input: libc::c_int) -> Self |
}
enum Context {
InvalidInput,
MissingWaker,
Code(s2n_status_code::Type, Errno),
Application(Box<dyn std::error::Error + Send + Sync +'static>),
}
pub struct Error(Context);
pub trait Fallible {
type Output;
fn into_result(self) -> Result<Self::Output, Error>;
}
impl Fallible for s2n_status_code::Type {
type Output = s2n_status_code::Type;
fn into_result(self) -> Result<Self::Output, Error> {
if self >= s2n_status_code::SUCCESS {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl Fallible for isize {
type Output = usize;
fn into_result(self) -> Result<Self::Output, Error> {
// Negative values can't be converted to a real size
// and instead indicate an error.
self.try_into().map_err(|_| Error::capture())
}
}
impl Fallible for u64 {
type Output = Self;
/// Converts a u64 to a Result by checking for u64::MAX.
///
/// If a method that returns an unsigned int is fallible,
/// then the -1 error result wraps around to u64::MAX.
///
/// For a u64 to be Fallible, a result of u64::MAX must not be
/// possible without an error. For example, [`s2n_connection_get_delay`]
/// can't return u64::MAX as a valid result because
/// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
/// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
/// would therefore only return u64::MAX for a -1 error result.
fn into_result(self) -> Result<Self::Output, Error> {
if self!= Self::MAX {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *mut T {
type Output = NonNull<T>;
fn into_result(self) -> Result<Self::Output, Error> {
if let Some(value) = NonNull::new(self) {
Ok(value)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *const T {
type Output = *const T;
fn into_result(self) -> Result<Self::Output, Error> {
if!self.is_null() {
Ok(self)
} else {
Err(Error::capture())
}
}
}
pub trait Pollable {
type Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>>;
}
impl<T: Fallible> Pollable for T {
type Output = T::Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>> {
match self.into_result() {
Ok(r) => Ok(r).into(),
Err(err) if err.is_retryable() => Poll::Pending,
Err(err) => Err(err).into(),
}
}
}
impl Error {
pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);
/// Converts an io::Error into an s2n-tls Error
pub fn io_error(err: std::io::Error) -> Error {
let errno = err.raw_os_error().unwrap_or(1);
errno::set_errno(errno::Errno(errno));
s2n_status_code::FAILURE.into_result().unwrap_err()
}
/// An error occurred while running application code.
///
/// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
/// async task failure.
pub fn application(error: Box<dyn std::error::Error + Send + Sync +'static>) -> Self {
Self(Context::Application(error))
}
fn capture() -> Self {
unsafe {
let s2n_errno = s2n_errno_location();
let code = *s2n_errno;
// https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
//# To avoid possible confusion, s2n_errno should be cleared after processing
//# an error: s2n_errno = S2N_ERR_T_OK
*s2n_errno = s2n_error_type::OK as _;
Self(Context::Code(code, errno()))
}
}
pub fn name(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "InvalidInput",
Context::MissingWaker => "MissingWaker",
Context::Application(_) => "ApplicationError",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror_name(code))
},
}
}
pub fn message(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "A parameter was incorrect",
Context::MissingWaker => {
"Tried to perform an asynchronous operation without a configured waker"
}
Context::Application(_) => "An error occurred while executing application code",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror(code, core::ptr::null()))
},
}
}
pub fn debug(&self) -> Option<&'static str> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => unsafe {
let debug_info = s2n_strerror_debug(code, core::ptr::null());
// The debug string should be set to a constant static string
// when an error occurs, but because it starts out as NULL
// we should defend against mistakes.
if debug_info.is_null() {
None
} else {
// If the string is not null, then we can assume that
// it is constant and static.
Some(cstr_to_str(debug_info))
}
},
}
}
pub fn kind(&self) -> ErrorType {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
Context::Application(_) => ErrorType::Application,
Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
}
}
pub fn source(&self) -> ErrorSource {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
Context::Application(_) => ErrorSource::Application,
Context::Code(_, _) => ErrorSource::Library,
}
}
#[allow(clippy::borrowed_box)]
/// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
/// otherwise returns None.
pub fn application_error(&self) -> Option<&Box<dyn std::error::Error + Send + Sync +'static>> {
if let Self(Context::Application(err)) = self {
Some(err)
} else {
None
}
}
pub fn is_retryable(&self) -> bool {
matches!(self.kind(), ErrorType::Blocked)
}
}
#[cfg(feature = "quic")]
impl Error {
/// s2n-tls does not send specific errors.
///
/// However, we can attempt to map local errors into the alerts
/// that we would have sent if we sent alerts.
///
/// This API is currently incomplete and should not be relied upon.
pub fn alert(&self) -> Option<u8> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => {
let mut alert = 0;
let r = unsafe { s2n_error_get_alert(code, &mut alert) };
match r.into_result() {
Ok(_) => Some(alert),
Err(_) => None,
}
}
}
}
}
/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
let slice = CStr::from_ptr(v);
let bytes = slice.to_bytes();
core::str::from_utf8_unchecked(bytes)
}
impl TryFrom<std::io::Error> for Error {
type Error = Error;
fn try_from(value: std::io::Error) -> Result<Self, Self::Error> {
let io_inner = value.into_inner().ok_or(Error::INVALID_INPUT)?;
io_inner
.downcast::<Self>()
.map(|error| *error)
.map_err(|_| Error::INVALID_INPUT)
}
}
impl From<Error> for std::io::Error {
fn from(input: Error) -> Self {
if let Context::Code(_, errno) = input.0 {
if ErrorType::IOError == input.kind() {
let bare = std::io::Error::from_raw_os_error(errno.0);
return std::io::Error::new(bare.kind(), input);
}
}
std::io::Error::new(std::io::ErrorKind::Other, input)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Error");
if let Context::Code(code, _) = self.0 {
s.field("code", &code);
}
s.field("name", &self.name());
s.field("message", &self.message());
s.field("kind", &self.kind());
s.field("source", &self.source());
if let Some(debug) = self.debug() {
s.field("debug", &debug);
}
// "errno" is only known to be meaningful for IOErrors.
// However, it has occasionally proved useful for debugging
// other errors, so include it for all errors.
if let Context::Code(_, errno) = self.0 {
s.field("errno", &errno.to_string());
}
s.finish()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Self(Context::Application(err)) = self {
err.fmt(f)
} else {
f.write_str(self.message())
}
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
// implement `source` in the same way `std::io::Error` implements it:
// https://doc.rust-lang.org/std/io/struct.Error.html#method.source
if let Self(Context::Application(err)) = self {
err.source()
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{enums::Version, testing::client_hello::CustomError};
use errno::set_errno;
const FAILURE: isize = -1;
// This relies on an implementation detail of s2n-tls errors,
// and could make these tests brittle. However, the alternative
// is a real handshake producing a real IO error, so just updating
// this value if the definition of an IO error changes might be easier.
const S2N_IO_ERROR_CODE: s2n_status_code::Type = 1 << 26;
#[test]
fn s2n_io_error_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
set_errno(Errno(libc::ECONNRESET));
unsafe {
let s2n_errno_ptr = s2n_errno_location();
*s2n_errno_ptr = S2N_IO_ERROR_CODE;
}
let s2n_error = FAILURE.into_result().unwrap_err();
assert_eq!(ErrorType::IOError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::ConnectionReset, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn s2n_error_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
set_errno(Errno(libc::ECONNRESET));
unsafe {
let s2n_errno_ptr = s2n_errno_location();
*s2n_errno_ptr = S2N_IO_ERROR_CODE - 1;
}
let s2n_error = FAILURE.into_result().unwrap_err();
assert_ne!(ErrorType::IOError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::Other, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn invalid_input_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
let s2n_error = Version::try_from(0).unwrap_err();
assert_eq!(ErrorType::UsageError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::Other, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn error_source() -> Result<(), Box<dyn std::error::Error>> {
let bindings_error = Version::try_from(0).unwrap_err();
assert_eq!(ErrorSource::Bindings, bindings_error.source());
let library_error = FAILURE.into_result().unwrap_err();
assert_eq!(ErrorSource::Library, library_error.source());
Ok(())
}
#[test]
fn application_error() {
// test single level errors
{
let error = Error::application(Box::new(CustomError));
let app_error = error.application_error().unwrap();
let _custom_error = app_error.downcast_ref::<CustomError>().unwrap();
}
// make sure nested errors work
{
let io_error = std::io::Error::new(std::io::ErrorKind::Other, CustomError);
let error = Error::application(Box::new(io_error));
let app_error = error.application_error().unwrap();
let io_error = app_error.downcast_ref::<std::io::Error>().unwrap();
let _custom_error = io_error
.get_ref()
.unwrap()
.downcast_ref::<CustomError>()
.unwrap();
}
}
}
| {
match input as s2n_error_type::Type {
s2n_error_type::OK => ErrorType::NoError,
s2n_error_type::IO => ErrorType::IOError,
s2n_error_type::CLOSED => ErrorType::ConnectionClosed,
s2n_error_type::BLOCKED => ErrorType::Blocked,
s2n_error_type::ALERT => ErrorType::Alert,
s2n_error_type::PROTO => ErrorType::ProtocolError,
s2n_error_type::INTERNAL => ErrorType::InternalError,
s2n_error_type::USAGE => ErrorType::UsageError,
_ => ErrorType::UnknownErrorType,
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.