file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs | //! Adler-32 checksum implementation.
//! | //! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/0.2.2")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
use std::hash::Hasher;
use std::ops::{AddAssign, MulAssign, RemAssign};
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash, they are not necessarily good at being
/// one).
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// //...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 +... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 +... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1,..., Dn).
//
// If we fix some value k<N and rewrite indices 1,..., N as
//
// 1_1, 1_2,..., 1_k, 2_1,..., 2_k,..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j +... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j +... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) +... + ka(k) + 1
// b = k*(kb(1) + kb(2) +... + kb(k)) - 1*ka(2) -... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF.
///
/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
return Ok(h.checksum());
}
h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::BufReader;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[test]
fn bufread() {
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32_reader(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
} | //! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Decent performance (3-4 GB/s). | random_line_split |
lib.rs | //! Adler-32 checksum implementation.
//!
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Decent performance (3-4 GB/s).
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/0.2.2")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
use std::hash::Hasher;
use std::ops::{AddAssign, MulAssign, RemAssign};
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash, they are not necessarily good at being
/// one).
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// //...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 +... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 +... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1,..., Dn).
//
// If we fix some value k<N and rewrite indices 1,..., N as
//
// 1_1, 1_2,..., 1_k, 2_1,..., 2_k,..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j +... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j +... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) +... + ka(k) + 1
// b = k*(kb(1) + kb(2) +... + kb(k)) - 1*ka(2) -... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF.
///
/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
| h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::BufReader;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[test]
fn bufread() {
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32_reader(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
}
| return Ok(h.checksum());
}
| conditional_block |
main.rs | extern crate rand;
extern crate getopts;
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate serde_json;
extern crate bincode;
extern crate rayon;
pub mod aabb;
pub mod background;
pub mod bvh;
pub mod camera;
pub mod deserialize;
pub mod dielectric;
pub mod disc;
pub mod emitter;
pub mod hitable;
pub mod hitable_list;
pub mod lambertian;
pub mod material;
pub mod metal;
pub mod mixture;
// pub mod phong;
pub mod plane;
pub mod random;
pub mod ray;
pub mod rectangle;
pub mod scene;
pub mod sampling;
pub mod sphere;
pub mod sphere_geometry;
pub mod triangle_mesh;
pub mod vector;
pub mod tests;
pub mod ward;
use aabb::AABB;
use background::*;
use bvh::BVH;
use camera::Camera;
use deserialize::*;
use disc::*;
use getopts::Options;
use hitable::*;
use rand::Rng;
use random::*;
use ray::Ray;
use vector::Vec3;
use std::cmp;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::BufWriter;
use std::io::Write;
use rayon::prelude::*;
//////////////////////////////////////////////////////////////////////////////
fn color(ray: &Ray, world: &Hitable,
background: &Background,
lights: &Vec<AABB>) -> Vec3 where
| },
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else {
return Vec3::new(0.0, 0.0, 0.0);
};
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w!= summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h!= summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len()!= summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1!= l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn write_image(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let ns = args.s.unwrap_or(100) / n_threads;
let background_ref = &*background;
let bvh_world_ref = &*bvh_world;
println!("With {} threads", n_threads);
let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| {
let mut output_image = Vec::<Vec<Vec3>>::new();
for _j in 0..ny {
output_image.push(vec![Vec3::zero(); nx]);
}
let mut rng = rand::thread_rng();
for s in 1..ns+1 {
update_all_pixels(&mut output_image,
&camera, bvh_world_ref, background_ref, &lights,
nx, ny, &mut rng);
if i == 0 {
eprint!("\r \r{} / {} done", s, ns);
}
}
if i == 0 {
eprintln!("\nFinished");
}
ImageSummaries {
w: nx,
h: ny,
s: ns,
data: output_image
}
}).collect();
let mut summary = output_summaries[0].clone();
for new_summary in output_summaries.iter().skip(1) {
summary = combine_summaries(&summary, &new_summary);
}
println!("Using {} samples", summary.s);
write_image_to_file(&summary.data, summary.s, 1, &output_name);
}
//////////////////////////////////////////////////////////////////////////////
struct Args {
pub w: Option<usize>,
pub h: Option<usize>,
pub s: Option<usize>,
pub n: Option<usize>,
pub o: Option<String>,
pub i: Option<String>,
pub parallel: bool
}
fn main() {
random::init_rng();
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optopt("w", "width", "set image width in pixels", "NAME");
opts.optopt("h", "height", "set image height in pixels", "NAME");
opts.optopt("s", "samples", "set number of samples per pixel", "NAME");
opts.optopt("n", "nthreads", "number of threads, default 1", "NAME");
opts.optopt("o", "output", "set output file name", "NAME");
opts.optopt("i", "input", "set input file name", "NAME");
opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing");
opts.optflag("?", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
write_image(&(Args {
w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()),
h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()),
s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()),
n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()),
o: matches.opt_str("o"),
i: matches.opt_str("i"),
parallel: matches.opt_present("p")
}));
}
| {
let mut current_ray = *ray;
let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if !hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered; | identifier_body |
main.rs | extern crate rand;
extern crate getopts;
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate serde_json;
extern crate bincode;
extern crate rayon;
pub mod aabb;
pub mod background;
pub mod bvh;
pub mod camera;
pub mod deserialize;
pub mod dielectric;
pub mod disc;
pub mod emitter;
pub mod hitable;
pub mod hitable_list;
pub mod lambertian;
pub mod material;
pub mod metal;
pub mod mixture;
// pub mod phong;
pub mod plane;
pub mod random;
pub mod ray;
pub mod rectangle;
pub mod scene;
pub mod sampling;
pub mod sphere;
pub mod sphere_geometry;
pub mod triangle_mesh;
pub mod vector;
pub mod tests;
pub mod ward;
use aabb::AABB;
use background::*;
use bvh::BVH;
use camera::Camera;
use deserialize::*;
use disc::*;
use getopts::Options;
use hitable::*;
use rand::Rng;
use random::*;
use ray::Ray;
use vector::Vec3;
use std::cmp;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::BufWriter;
use std::io::Write;
use rayon::prelude::*;
//////////////////////////////////////////////////////////////////////////////
fn color(ray: &Ray, world: &Hitable,
background: &Background,
lights: &Vec<AABB>) -> Vec3 where
{
let mut current_ray = *ray;
let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if!hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else {
return Vec3::new(0.0, 0.0, 0.0);
};
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w!= summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h!= summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len()!= summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1!= l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn | (args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let ns = args.s.unwrap_or(100) / n_threads;
let background_ref = &*background;
let bvh_world_ref = &*bvh_world;
println!("With {} threads", n_threads);
let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| {
let mut output_image = Vec::<Vec<Vec3>>::new();
for _j in 0..ny {
output_image.push(vec![Vec3::zero(); nx]);
}
let mut rng = rand::thread_rng();
for s in 1..ns+1 {
update_all_pixels(&mut output_image,
&camera, bvh_world_ref, background_ref, &lights,
nx, ny, &mut rng);
if i == 0 {
eprint!("\r \r{} / {} done", s, ns);
}
}
if i == 0 {
eprintln!("\nFinished");
}
ImageSummaries {
w: nx,
h: ny,
s: ns,
data: output_image
}
}).collect();
let mut summary = output_summaries[0].clone();
for new_summary in output_summaries.iter().skip(1) {
summary = combine_summaries(&summary, &new_summary);
}
println!("Using {} samples", summary.s);
write_image_to_file(&summary.data, summary.s, 1, &output_name);
}
//////////////////////////////////////////////////////////////////////////////
struct Args {
pub w: Option<usize>,
pub h: Option<usize>,
pub s: Option<usize>,
pub n: Option<usize>,
pub o: Option<String>,
pub i: Option<String>,
pub parallel: bool
}
fn main() {
random::init_rng();
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optopt("w", "width", "set image width in pixels", "NAME");
opts.optopt("h", "height", "set image height in pixels", "NAME");
opts.optopt("s", "samples", "set number of samples per pixel", "NAME");
opts.optopt("n", "nthreads", "number of threads, default 1", "NAME");
opts.optopt("o", "output", "set output file name", "NAME");
opts.optopt("i", "input", "set input file name", "NAME");
opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing");
opts.optflag("?", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
write_image(&(Args {
w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()),
h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()),
s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()),
n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()),
o: matches.opt_str("o"),
i: matches.opt_str("i"),
parallel: matches.opt_present("p")
}));
}
| write_image | identifier_name |
main.rs | extern crate rand;
extern crate getopts;
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate serde_json;
extern crate bincode;
extern crate rayon;
pub mod aabb;
pub mod background;
pub mod bvh;
pub mod camera;
pub mod deserialize;
pub mod dielectric;
pub mod disc;
pub mod emitter;
pub mod hitable;
pub mod hitable_list;
pub mod lambertian;
pub mod material;
pub mod metal;
pub mod mixture;
// pub mod phong;
pub mod plane;
pub mod random;
pub mod ray;
pub mod rectangle;
pub mod scene;
pub mod sampling;
pub mod sphere;
pub mod sphere_geometry;
pub mod triangle_mesh;
pub mod vector;
pub mod tests;
pub mod ward;
use aabb::AABB;
use background::*;
use bvh::BVH;
use camera::Camera;
use deserialize::*;
use disc::*;
use getopts::Options;
use hitable::*;
use rand::Rng;
use random::*;
use ray::Ray;
use vector::Vec3;
use std::cmp;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::BufWriter;
use std::io::Write;
use rayon::prelude::*;
//////////////////////////////////////////////////////////////////////////////
fn color(ray: &Ray, world: &Hitable,
background: &Background,
lights: &Vec<AABB>) -> Vec3 where
{
let mut current_ray = *ray;
let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if!hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else {
return Vec3::new(0.0, 0.0, 0.0);
};
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
} |
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w!= summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h!= summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len()!= summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1!= l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn write_image(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let ns = args.s.unwrap_or(100) / n_threads;
let background_ref = &*background;
let bvh_world_ref = &*bvh_world;
println!("With {} threads", n_threads);
let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| {
let mut output_image = Vec::<Vec<Vec3>>::new();
for _j in 0..ny {
output_image.push(vec![Vec3::zero(); nx]);
}
let mut rng = rand::thread_rng();
for s in 1..ns+1 {
update_all_pixels(&mut output_image,
&camera, bvh_world_ref, background_ref, &lights,
nx, ny, &mut rng);
if i == 0 {
eprint!("\r \r{} / {} done", s, ns);
}
}
if i == 0 {
eprintln!("\nFinished");
}
ImageSummaries {
w: nx,
h: ny,
s: ns,
data: output_image
}
}).collect();
let mut summary = output_summaries[0].clone();
for new_summary in output_summaries.iter().skip(1) {
summary = combine_summaries(&summary, &new_summary);
}
println!("Using {} samples", summary.s);
write_image_to_file(&summary.data, summary.s, 1, &output_name);
}
//////////////////////////////////////////////////////////////////////////////
struct Args {
pub w: Option<usize>,
pub h: Option<usize>,
pub s: Option<usize>,
pub n: Option<usize>,
pub o: Option<String>,
pub i: Option<String>,
pub parallel: bool
}
fn main() {
random::init_rng();
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optopt("w", "width", "set image width in pixels", "NAME");
opts.optopt("h", "height", "set image height in pixels", "NAME");
opts.optopt("s", "samples", "set number of samples per pixel", "NAME");
opts.optopt("n", "nthreads", "number of threads, default 1", "NAME");
opts.optopt("o", "output", "set output file name", "NAME");
opts.optopt("i", "input", "set input file name", "NAME");
opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing");
opts.optflag("?", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
write_image(&(Args {
w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()),
h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()),
s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()),
n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()),
o: matches.opt_str("o"),
i: matches.opt_str("i"),
parallel: matches.opt_present("p")
}));
} | random_line_split |
|
main.rs | extern crate rand;
extern crate getopts;
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate serde_json;
extern crate bincode;
extern crate rayon;
pub mod aabb;
pub mod background;
pub mod bvh;
pub mod camera;
pub mod deserialize;
pub mod dielectric;
pub mod disc;
pub mod emitter;
pub mod hitable;
pub mod hitable_list;
pub mod lambertian;
pub mod material;
pub mod metal;
pub mod mixture;
// pub mod phong;
pub mod plane;
pub mod random;
pub mod ray;
pub mod rectangle;
pub mod scene;
pub mod sampling;
pub mod sphere;
pub mod sphere_geometry;
pub mod triangle_mesh;
pub mod vector;
pub mod tests;
pub mod ward;
use aabb::AABB;
use background::*;
use bvh::BVH;
use camera::Camera;
use deserialize::*;
use disc::*;
use getopts::Options;
use hitable::*;
use rand::Rng;
use random::*;
use ray::Ray;
use vector::Vec3;
use std::cmp;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::BufWriter;
use std::io::Write;
use rayon::prelude::*;
//////////////////////////////////////////////////////////////////////////////
fn color(ray: &Ray, world: &Hitable,
background: &Background,
lights: &Vec<AABB>) -> Vec3 where
{
let mut current_ray = *ray;
let mut current_attenuation = Vec3::new(1.0, 1.0, 1.0);
for _depth in 0..50 {
if current_attenuation.length() < 1e-8 {
return Vec3::new(0.0, 0.0, 0.0)
}
match world.hit(¤t_ray, 0.00001, 1e20) {
None => {
let unit_direction = vector::unit_vector(¤t_ray.direction());
return background.get_background(&unit_direction) * current_attenuation;
},
Some(hr) => {
if!hr.material.wants_importance_sampling() || lights.len() == 0 {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(next_attenuation, scattered) => {
current_attenuation = current_attenuation * next_attenuation;
current_ray = scattered;
},
material::Scatter::Emit(emission) => {
// println!("Hit light!");
return emission * current_attenuation;
},
material::Scatter::Absorb => {
return Vec3::new(0.0, 0.0, 0.0)
}
}
continue;
}
let this_hemi = Disc::new(hr.p, hr.normal, 1.0);
let light = {
let chosen_light = &lights[rand_range(0, lights.len())];
let chosen_disc = chosen_light.project_to_disc_on_sphere(&hr.p);
// sample from that disc
let gx_sample = this_hemi.hemi_disc_subtended_angle(&chosen_disc);
let gx = gx_sample.0;
let sample_direction = gx_sample.1;
if gx == 0.0 {
(0.0, sample_direction)
} else {
(2.0 * std::f64::consts::PI / gx_sample.0, sample_direction)
}
};
let scatter = {
match hr.material.scatter(¤t_ray, &hr) {
material::Scatter::Bounce(_attenuation, scattered) => {
(hr.material.bsdf(¤t_ray, &scattered, &hr.normal),
scattered.direction())
}
material::Scatter::Emit(_emission) => {
panic!("Whaaaaa emit?!")
},
material::Scatter::Absorb => {
panic!("Whaaaaa absorb?!")
}
}
};
let light_p = if light.0 < 1e-4 { 0.0 } else { light.0 };
let light_d = light.1;
let scatter_p = if scatter.0 < 1e-4 { 0.0 } else { scatter.0 };
let scatter_d = scatter.1;
// Veach's balance heuristic for a one-sample MIS estimator
// gives these weights:
let s = light_p + scatter_p;
let light_w = light_p / s;
let scatter_w = scatter_p / s;
// println!("{} {}", light_p, scatter_p);
// the classic Veach one-sample MIS estimator is
// (light_w / light_p) * light_f + (scatter_w / scatter_p) * scatter_f
let next_values = if (light_p > 0.0) && rand_double() < 0.5 { // sample from lights
((light_w / light_p) * 2.0, Ray::new(hr.p, light_d))
} else if scatter_p > 0.0 {
((scatter_w / scatter_p) * 2.0, Ray::new(hr.p, scatter_d))
} else | ;
let albedo = hr.material.albedo(¤t_ray, &next_values.1, &hr.normal);
current_ray = next_values.1;
current_attenuation = current_attenuation * albedo * next_values.0;
}
}
}
current_attenuation
}
//////////////////////////////////////////////////////////////////////////////
// my own bastardized version of a float file format, horrendously inefficient
fn write_image_to_file(image: &Vec<Vec<Vec3>>, samples_so_far: usize, subsample: usize, file_prefix: &String)
{
println!("Writing output to {}",
format!("{}.linear_rgb", file_prefix));
let mut f = BufWriter::new(File::create(format!("{}.linear_rgb", file_prefix)).unwrap());
let ny = image.len()/subsample;
let nx = image[0].len()/subsample;
let ns = samples_so_far as f64;
f.write_fmt(format_args!("{} {}\n", nx, ny)).unwrap();
for super_j in (0..ny).rev() {
for super_i in 0..nx {
let mut super_pixel = Vec3::zero();
let top = cmp::min(image.len(), (super_j+1)*subsample);
let right = cmp::min(image[0].len(), (super_i+1)*subsample);
let h = top - super_j*subsample;
let w = right - super_i*subsample;
for j in (super_j*subsample..top).rev() {
for i in super_i*subsample..right {
super_pixel = super_pixel + image[j][i];
}
}
let mut out_col = super_pixel / (ns * (w as f64) * (h as f64));
f.write_fmt(format_args!("{} {} {}\n", out_col[0], out_col[1], out_col[2])).unwrap();
}
}
}
fn update_all_pixels(output_image: &mut Vec<Vec<Vec3>>,
camera: &Camera,
bvh_world: &Hitable,
background: &Background,
lights: &Vec<AABB>,
nx: usize,
ny: usize,
rng: &mut rand::ThreadRng) {
for j in (0..ny).rev() {
for i in 0..nx {
let u = ((i as f64) + rng.gen::<f64>()) / (nx as f64);
let v = ((j as f64) + rng.gen::<f64>()) / (ny as f64);
let r = camera.get_ray(u, v);
output_image[j][i] = output_image[j][i] + color(&r, bvh_world, background, lights);
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct ImageSummaries {
w: usize,
h: usize,
s: usize,
data: Vec<Vec<Vec3>>
}
fn combine_summaries(summary1: &ImageSummaries,
summary2: &ImageSummaries) -> ImageSummaries {
if summary1.w!= summary2.w {
panic!(format!("Need same widths ({} vs {})!",
summary1.w, summary2.w));
}
if summary1.h!= summary2.h {
panic!(format!("Need same heights ({} vs {})!",
summary1.h, summary2.h));
}
if summary1.data.len()!= summary2.data.len() {
panic!(format!("Inconsistent data lengths ({} {}) - upstream bug?",
summary1.data.len(), summary2.data.len()));
}
let mut result = Vec::new();
for i in 0..summary1.data.len() {
let l1 = summary1.data[i].len();
let l2 = summary2.data[i].len();
if l1!= l2 {
panic!(format!(
"Inconsistent row lengths (row {}: {} {}) - upstream bug?",
i, l1, l2));
}
let row1 = summary1.data[i].iter();
let row2 = summary2.data[i].iter();
result.push(row1.zip(row2).map(|(v1, v2)| *v1 + *v2).collect())
}
ImageSummaries {
w: summary1.w,
h: summary1.h,
s: summary1.s + summary2.s,
data: result
}
}
fn write_image(args: &Args)
{
let default_output_name = "out".to_string();
let output_name = &args.o.as_ref().unwrap_or(&default_output_name);
let default_input_name = "/dev/stdin".to_string();
let input_name = &args.i.as_ref().unwrap_or(&default_input_name);
let br = BufReader::new(File::open(input_name).unwrap());
let json_value = serde_json::from_reader(br).unwrap();
let scene = deserialize_scene(&json_value).unwrap();
let background = scene.background;
let camera = scene.camera;
let lights: Vec<_> = scene.object_list
.iter()
.map(|h| h.importance_distribution())
.filter(|h| h.is_some())
.map(|h| h.unwrap())
.collect();
let bvh_world = BVH::build(scene.object_list);
let ny = args.h.unwrap_or(200);
let nx = args.w.unwrap_or_else(|| ((ny as f64) * camera.params.aspect).round() as usize);
let n_threads = args.n.unwrap_or(1);
let ns = args.s.unwrap_or(100) / n_threads;
let background_ref = &*background;
let bvh_world_ref = &*bvh_world;
println!("With {} threads", n_threads);
let output_summaries: Vec<_> = (0..n_threads).into_par_iter().map(|i| {
let mut output_image = Vec::<Vec<Vec3>>::new();
for _j in 0..ny {
output_image.push(vec![Vec3::zero(); nx]);
}
let mut rng = rand::thread_rng();
for s in 1..ns+1 {
update_all_pixels(&mut output_image,
&camera, bvh_world_ref, background_ref, &lights,
nx, ny, &mut rng);
if i == 0 {
eprint!("\r \r{} / {} done", s, ns);
}
}
if i == 0 {
eprintln!("\nFinished");
}
ImageSummaries {
w: nx,
h: ny,
s: ns,
data: output_image
}
}).collect();
let mut summary = output_summaries[0].clone();
for new_summary in output_summaries.iter().skip(1) {
summary = combine_summaries(&summary, &new_summary);
}
println!("Using {} samples", summary.s);
write_image_to_file(&summary.data, summary.s, 1, &output_name);
}
//////////////////////////////////////////////////////////////////////////////
struct Args {
pub w: Option<usize>,
pub h: Option<usize>,
pub s: Option<usize>,
pub n: Option<usize>,
pub o: Option<String>,
pub i: Option<String>,
pub parallel: bool
}
fn main() {
random::init_rng();
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optopt("w", "width", "set image width in pixels", "NAME");
opts.optopt("h", "height", "set image height in pixels", "NAME");
opts.optopt("s", "samples", "set number of samples per pixel", "NAME");
opts.optopt("n", "nthreads", "number of threads, default 1", "NAME");
opts.optopt("o", "output", "set output file name", "NAME");
opts.optopt("i", "input", "set input file name", "NAME");
opts.optflag("p", "parallel", "write out pixel statistics, suited for parallel processing");
opts.optflag("?", "help", "print this help menu");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
write_image(&(Args {
w: matches.opt_str("w").and_then(|x| x.parse::<usize>().ok()),
h: matches.opt_str("h").and_then(|x| x.parse::<usize>().ok()),
s: matches.opt_str("s").and_then(|x| x.parse::<usize>().ok()),
n: matches.opt_str("n").and_then(|x| x.parse::<usize>().ok()),
o: matches.opt_str("o"),
i: matches.opt_str("i"),
parallel: matches.opt_present("p")
}));
}
| {
return Vec3::new(0.0, 0.0, 0.0);
} | conditional_block |
mod.rs | use crate::css::{is_not, CallArgs, CssString, Value};
use crate::error::Error;
use crate::output::{Format, Formatted};
use crate::parser::SourcePos;
use crate::sass::{FormalArgs, Name};
use crate::value::Numeric;
use crate::{sass, Scope, ScopeRef};
use lazy_static::lazy_static;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::{cmp, fmt};
#[macro_use]
mod macros;
mod color;
mod list;
mod map;
mod math;
mod meta;
mod selector;
mod string;
type BuiltinFn = dyn Fn(&ScopeRef) -> Result<Value, Error> + Send + Sync;
/// A function that can be called from a sass value.
///
/// The function can be either "builtin" (implemented in rust) or
/// "user defined" (implemented in scss).
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd)]
pub struct Function {
args: FormalArgs,
pos: SourcePos,
body: FuncImpl,
}
#[derive(Clone)]
pub enum FuncImpl {
Builtin(Arc<BuiltinFn>),
/// A user-defined function is really a closure, it has a scope
/// where it is defined and a body of items.
UserDefined(ScopeRef, Vec<sass::Item>),
}
impl PartialOrd for FuncImpl {
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
match (self, rhs) {
(&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None,
(&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => {
Some(cmp::Ordering::Less)
}
(&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => {
Some(cmp::Ordering::Greater)
}
(
&FuncImpl::UserDefined(ref _sa, ref a),
&FuncImpl::UserDefined(ref _sb, ref b),
) => a.partial_cmp(b),
}
}
}
impl cmp::PartialEq for FuncImpl {
fn eq(&self, rhs: &FuncImpl) -> bool {
match (self, rhs) {
(
&FuncImpl::UserDefined(ref sa, ref a),
&FuncImpl::UserDefined(ref sb, ref b),
) => ScopeRef::is_same(sa, sb) && a == b,
(&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => {
// Each builtin function is only created once, so this
// should be ok.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(a, b)
}
_ => false,
}
}
}
impl cmp::Eq for FuncImpl {}
impl fmt::Debug for FuncImpl {
fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FuncImpl::Builtin(_) => write!(out, "(builtin function)"),
FuncImpl::UserDefined(..) => {
write!(out, "(user-defined function)")
}
}
}
}
trait Functions {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
);
}
impl Functions for Scope {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin(&self.get_name(), &name, args, body);
self.define_function(name, f);
}
}
impl Function {
/// Get a built-in function by name.
pub fn get_builtin(name: &Name) -> Option<&'static Function> {
FUNCTIONS.get(name)
}
/// Create a new `Function` from a rust implementation.
///
/// Note: This does not expose the function in any scope, it just
/// creates it.
pub fn builtin(
module: &str,
name: &Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) -> Self {
let pos = SourcePos::mock_function(name, &args, module);
Function {
args,
pos,
body: FuncImpl::Builtin(body),
}
}
/// Create a new `Function` from a scss implementation.
///
/// The scope is where the function is defined, used to bind any
/// non-parameter names in the body.
pub fn | (
args: FormalArgs,
pos: SourcePos,
scope: ScopeRef,
body: Vec<sass::Item>,
) -> Self {
Function {
args,
pos,
body: FuncImpl::UserDefined(scope, body),
}
}
/// Call the function from a given scope and with a given set of
/// arguments.
pub fn call(
&self,
callscope: ScopeRef,
args: CallArgs,
) -> Result<Value, Error> {
let cs = "%%CALLING_SCOPE%%";
match self.body {
FuncImpl::Builtin(ref body) => {
let s = self.do_eval_args(
ScopeRef::new_global(callscope.get_format()),
args,
)?;
s.define_module(cs.into(), callscope);
body(&s)
}
FuncImpl::UserDefined(ref defscope, ref body) => {
let s = self.do_eval_args(defscope.clone(), args)?;
s.define_module(cs.into(), callscope);
Ok(s.eval_body(body)?.unwrap_or(Value::Null))
}
}
.map(Value::into_calculated)
}
fn do_eval_args(
&self,
def: ScopeRef,
args: CallArgs,
) -> Result<ScopeRef, Error> {
self.args.eval(def, args).map_err(|e| match e {
sass::ArgsError::Eval(e) => e,
ae => Error::BadArguments(ae.to_string(), self.pos.clone()),
})
}
}
lazy_static! {
static ref MODULES: BTreeMap<&'static str, Scope> = {
let mut modules = BTreeMap::new();
modules.insert("sass:color", color::create_module());
modules.insert("sass:list", list::create_module());
modules.insert("sass:map", map::create_module());
modules.insert("sass:math", math::create_module());
modules.insert("sass:meta", meta::create_module());
modules.insert("sass:selector", selector::create_module());
modules.insert("sass:string", string::create_module());
modules
};
}
/// Get a global module (e.g. `sass:math`) by name.
pub fn get_global_module(name: &str) -> Option<ScopeRef> {
MODULES.get(name).map(ScopeRef::Builtin)
}
type FunctionMap = BTreeMap<Name, Function>;
impl Functions for FunctionMap {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin("", &name, args, body);
self.insert(name, f);
}
}
lazy_static! {
static ref FUNCTIONS: FunctionMap = {
let mut f = BTreeMap::new();
def!(f, if(condition, if_true, if_false), |s| {
if s.get("condition")?.is_true() {
Ok(s.get("if_true")?)
} else {
Ok(s.get("if_false")?)
}
});
color::expose(MODULES.get("sass:color").unwrap(), &mut f);
list::expose(MODULES.get("sass:list").unwrap(), &mut f);
map::expose(MODULES.get("sass:map").unwrap(), &mut f);
math::expose(MODULES.get("sass:math").unwrap(), &mut f);
meta::expose(MODULES.get("sass:meta").unwrap(), &mut f);
selector::expose(MODULES.get("sass:selector").unwrap(), &mut f);
string::expose(MODULES.get("sass:string").unwrap(), &mut f);
f
};
}
// argument helpers for the actual functions
trait CheckedArg<T> {
fn named(self, name: Name) -> Result<T, Error>;
}
impl<T> CheckedArg<T> for Result<T, String> {
fn named(self, name: Name) -> Result<T, Error> {
self.map_err(|e| Error::BadArgument(name, e))
}
}
fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error>
where
F: Fn(Value) -> Result<T, String>,
{
check(s.get(name.as_ref())?).named(name)
}
fn get_opt_check<T, F>(
s: &Scope,
name: Name,
check: F,
) -> Result<Option<T>, Error>
where
F: Fn(Value) -> Result<T, String>,
{
match s.get(name.as_ref())? {
Value::Null => Ok(None),
v => check(v).named(name).map(Some),
}
}
fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> {
get_checked(s, name.into(), check::numeric)
}
fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> {
get_checked(s, name, check::unitless_int)
}
fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> {
get_checked(s, name.into(), check::string)
}
fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> {
get_checked(s, name, check::va_list)
}
fn expected_to<'a, T>(value: &'a T, cond: &str) -> String
where
Formatted<'a, T>: std::fmt::Display,
{
format!(
"Expected {} to {}.",
Formatted {
value,
format: Format::introspect()
},
cond,
)
}
mod check {
use super::{expected_to, is_not};
use crate::css::{CssString, Value};
use crate::value::{ListSeparator, Number, Numeric};
pub fn numeric(v: Value) -> Result<Numeric, String> {
v.numeric_value().map_err(|v| is_not(&v, "a number"))
}
pub fn int(v: Value) -> Result<i64, String> {
numeric(v)?
.value
.into_integer()
.map_err(|v| is_not(&v, "an int"))
}
pub fn unitless(v: Value) -> Result<Number, String> {
let val = numeric(v)?;
if val.is_no_unit() {
Ok(val.value)
} else {
Err(expected_to(&val, "have no units"))
}
}
pub fn unitless_int(v: Value) -> Result<i64, String> {
unitless(v)?
.into_integer()
.map_err(|v| is_not(&v, "an int"))
}
pub fn string(v: Value) -> Result<CssString, String> {
match v {
Value::Literal(s) => Ok(s),
Value::Call(name, args) => {
Ok(format!("{}({})", name, args).into())
}
v => Err(is_not(&v, "a string")),
}
}
pub fn va_list(v: Value) -> Result<Vec<Value>, String> {
match v {
Value::ArgList(args) => {
args.check_no_named().map_err(|e| e.to_string())?;
Ok(args.positional)
}
Value::List(v, Some(ListSeparator::Comma), _) => Ok(v),
single => Ok(vec![single]),
}
}
pub fn va_list_nonempty(v: Value) -> Result<Vec<Value>, String> {
let result = va_list(v)?;
if result.is_empty() {
// TODO: Parameterize "selector"? Or rename fn va_selectors?
Err("At least one selector must be passed.".into())
} else {
Ok(result)
}
}
}
#[test]
fn test_rgb() -> Result<(), Box<dyn std::error::Error>> {
use crate::parser::code_span;
use crate::parser::formalargs::call_args;
use crate::value::Rgba;
let scope = ScopeRef::new_global(Default::default());
assert_eq!(
FUNCTIONS.get(&name!(rgb)).unwrap().call(
scope.clone(),
call_args(code_span(b"(17, 0, 225)"))?.1.evaluate(scope)?
)?,
Value::Color(Rgba::from_rgb(17, 0, 225).into(), None)
);
Ok(())
}
#[test]
fn test_nth() {
assert_eq!("foo", do_evaluate(&[("x", "foo, bar")], b"nth($x, 1);"))
}
#[cfg(test)]
use crate::variablescope::test::do_evaluate;
| closure | identifier_name |
mod.rs | use crate::css::{is_not, CallArgs, CssString, Value};
use crate::error::Error;
use crate::output::{Format, Formatted};
use crate::parser::SourcePos;
use crate::sass::{FormalArgs, Name};
use crate::value::Numeric;
use crate::{sass, Scope, ScopeRef};
use lazy_static::lazy_static;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::{cmp, fmt};
#[macro_use]
mod macros;
mod color;
mod list;
mod map;
mod math;
mod meta;
mod selector;
mod string;
type BuiltinFn = dyn Fn(&ScopeRef) -> Result<Value, Error> + Send + Sync;
/// A function that can be called from a sass value.
///
/// The function can be either "builtin" (implemented in rust) or
/// "user defined" (implemented in scss).
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd)]
pub struct Function {
args: FormalArgs,
pos: SourcePos,
body: FuncImpl,
}
#[derive(Clone)]
pub enum FuncImpl {
Builtin(Arc<BuiltinFn>),
/// A user-defined function is really a closure, it has a scope
/// where it is defined and a body of items.
UserDefined(ScopeRef, Vec<sass::Item>),
}
impl PartialOrd for FuncImpl {
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
match (self, rhs) {
(&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None,
(&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => {
Some(cmp::Ordering::Less)
}
(&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => {
Some(cmp::Ordering::Greater)
}
(
&FuncImpl::UserDefined(ref _sa, ref a),
&FuncImpl::UserDefined(ref _sb, ref b),
) => a.partial_cmp(b),
}
}
}
impl cmp::PartialEq for FuncImpl {
fn eq(&self, rhs: &FuncImpl) -> bool {
match (self, rhs) {
(
&FuncImpl::UserDefined(ref sa, ref a),
&FuncImpl::UserDefined(ref sb, ref b),
) => ScopeRef::is_same(sa, sb) && a == b,
(&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => {
// Each builtin function is only created once, so this
// should be ok.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(a, b)
}
_ => false,
}
}
}
impl cmp::Eq for FuncImpl {}
impl fmt::Debug for FuncImpl {
fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FuncImpl::Builtin(_) => write!(out, "(builtin function)"),
FuncImpl::UserDefined(..) => {
write!(out, "(user-defined function)")
}
}
}
}
trait Functions {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
);
}
impl Functions for Scope {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin(&self.get_name(), &name, args, body);
self.define_function(name, f);
}
}
impl Function {
/// Get a built-in function by name.
pub fn get_builtin(name: &Name) -> Option<&'static Function> {
FUNCTIONS.get(name)
}
/// Create a new `Function` from a rust implementation.
///
/// Note: This does not expose the function in any scope, it just
/// creates it.
pub fn builtin(
module: &str,
name: &Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) -> Self {
let pos = SourcePos::mock_function(name, &args, module);
Function {
args,
pos,
body: FuncImpl::Builtin(body),
}
}
/// Create a new `Function` from a scss implementation.
///
/// The scope is where the function is defined, used to bind any
/// non-parameter names in the body.
pub fn closure(
args: FormalArgs,
pos: SourcePos,
scope: ScopeRef,
body: Vec<sass::Item>,
) -> Self {
Function {
args,
pos,
body: FuncImpl::UserDefined(scope, body),
}
}
/// Call the function from a given scope and with a given set of
/// arguments.
pub fn call(
&self,
callscope: ScopeRef,
args: CallArgs,
) -> Result<Value, Error> {
let cs = "%%CALLING_SCOPE%%";
match self.body {
FuncImpl::Builtin(ref body) => {
let s = self.do_eval_args(
ScopeRef::new_global(callscope.get_format()),
args,
)?;
s.define_module(cs.into(), callscope);
body(&s)
}
FuncImpl::UserDefined(ref defscope, ref body) => {
let s = self.do_eval_args(defscope.clone(), args)?;
s.define_module(cs.into(), callscope);
Ok(s.eval_body(body)?.unwrap_or(Value::Null))
}
}
.map(Value::into_calculated)
}
fn do_eval_args(
&self,
def: ScopeRef,
args: CallArgs,
) -> Result<ScopeRef, Error> {
self.args.eval(def, args).map_err(|e| match e {
sass::ArgsError::Eval(e) => e,
ae => Error::BadArguments(ae.to_string(), self.pos.clone()),
})
}
}
lazy_static! {
static ref MODULES: BTreeMap<&'static str, Scope> = {
let mut modules = BTreeMap::new();
modules.insert("sass:color", color::create_module());
modules.insert("sass:list", list::create_module());
modules.insert("sass:map", map::create_module());
modules.insert("sass:math", math::create_module());
modules.insert("sass:meta", meta::create_module());
modules.insert("sass:selector", selector::create_module());
modules.insert("sass:string", string::create_module());
modules
};
}
/// Get a global module (e.g. `sass:math`) by name.
pub fn get_global_module(name: &str) -> Option<ScopeRef> {
MODULES.get(name).map(ScopeRef::Builtin)
}
type FunctionMap = BTreeMap<Name, Function>;
impl Functions for FunctionMap {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin("", &name, args, body);
self.insert(name, f);
}
}
lazy_static! {
static ref FUNCTIONS: FunctionMap = {
let mut f = BTreeMap::new();
def!(f, if(condition, if_true, if_false), |s| {
if s.get("condition")?.is_true() {
Ok(s.get("if_true")?)
} else {
Ok(s.get("if_false")?)
}
});
color::expose(MODULES.get("sass:color").unwrap(), &mut f);
list::expose(MODULES.get("sass:list").unwrap(), &mut f);
map::expose(MODULES.get("sass:map").unwrap(), &mut f);
math::expose(MODULES.get("sass:math").unwrap(), &mut f);
meta::expose(MODULES.get("sass:meta").unwrap(), &mut f);
selector::expose(MODULES.get("sass:selector").unwrap(), &mut f);
string::expose(MODULES.get("sass:string").unwrap(), &mut f);
f
};
}
// argument helpers for the actual functions
trait CheckedArg<T> {
fn named(self, name: Name) -> Result<T, Error>;
}
impl<T> CheckedArg<T> for Result<T, String> {
fn named(self, name: Name) -> Result<T, Error> {
self.map_err(|e| Error::BadArgument(name, e))
}
}
fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error>
where
F: Fn(Value) -> Result<T, String>,
{
check(s.get(name.as_ref())?).named(name)
}
fn get_opt_check<T, F>(
s: &Scope,
name: Name,
check: F,
) -> Result<Option<T>, Error>
where
F: Fn(Value) -> Result<T, String>,
{
match s.get(name.as_ref())? {
Value::Null => Ok(None),
v => check(v).named(name).map(Some),
}
}
fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> {
get_checked(s, name.into(), check::numeric)
}
fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> {
get_checked(s, name, check::unitless_int)
}
fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> { | fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> {
get_checked(s, name, check::va_list)
}
fn expected_to<'a, T>(value: &'a T, cond: &str) -> String
where
Formatted<'a, T>: std::fmt::Display,
{
format!(
"Expected {} to {}.",
Formatted {
value,
format: Format::introspect()
},
cond,
)
}
mod check {
use super::{expected_to, is_not};
use crate::css::{CssString, Value};
use crate::value::{ListSeparator, Number, Numeric};
pub fn numeric(v: Value) -> Result<Numeric, String> {
v.numeric_value().map_err(|v| is_not(&v, "a number"))
}
pub fn int(v: Value) -> Result<i64, String> {
numeric(v)?
.value
.into_integer()
.map_err(|v| is_not(&v, "an int"))
}
pub fn unitless(v: Value) -> Result<Number, String> {
let val = numeric(v)?;
if val.is_no_unit() {
Ok(val.value)
} else {
Err(expected_to(&val, "have no units"))
}
}
pub fn unitless_int(v: Value) -> Result<i64, String> {
unitless(v)?
.into_integer()
.map_err(|v| is_not(&v, "an int"))
}
pub fn string(v: Value) -> Result<CssString, String> {
match v {
Value::Literal(s) => Ok(s),
Value::Call(name, args) => {
Ok(format!("{}({})", name, args).into())
}
v => Err(is_not(&v, "a string")),
}
}
pub fn va_list(v: Value) -> Result<Vec<Value>, String> {
match v {
Value::ArgList(args) => {
args.check_no_named().map_err(|e| e.to_string())?;
Ok(args.positional)
}
Value::List(v, Some(ListSeparator::Comma), _) => Ok(v),
single => Ok(vec![single]),
}
}
pub fn va_list_nonempty(v: Value) -> Result<Vec<Value>, String> {
let result = va_list(v)?;
if result.is_empty() {
// TODO: Parameterize "selector"? Or rename fn va_selectors?
Err("At least one selector must be passed.".into())
} else {
Ok(result)
}
}
}
#[test]
fn test_rgb() -> Result<(), Box<dyn std::error::Error>> {
use crate::parser::code_span;
use crate::parser::formalargs::call_args;
use crate::value::Rgba;
let scope = ScopeRef::new_global(Default::default());
assert_eq!(
FUNCTIONS.get(&name!(rgb)).unwrap().call(
scope.clone(),
call_args(code_span(b"(17, 0, 225)"))?.1.evaluate(scope)?
)?,
Value::Color(Rgba::from_rgb(17, 0, 225).into(), None)
);
Ok(())
}
#[test]
fn test_nth() {
assert_eq!("foo", do_evaluate(&[("x", "foo, bar")], b"nth($x, 1);"))
}
#[cfg(test)]
use crate::variablescope::test::do_evaluate; | get_checked(s, name.into(), check::string)
}
| random_line_split |
mod.rs | use crate::css::{is_not, CallArgs, CssString, Value};
use crate::error::Error;
use crate::output::{Format, Formatted};
use crate::parser::SourcePos;
use crate::sass::{FormalArgs, Name};
use crate::value::Numeric;
use crate::{sass, Scope, ScopeRef};
use lazy_static::lazy_static;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::{cmp, fmt};
#[macro_use]
mod macros;
mod color;
mod list;
mod map;
mod math;
mod meta;
mod selector;
mod string;
type BuiltinFn = dyn Fn(&ScopeRef) -> Result<Value, Error> + Send + Sync;
/// A function that can be called from a sass value.
///
/// The function can be either "builtin" (implemented in rust) or
/// "user defined" (implemented in scss).
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd)]
pub struct Function {
args: FormalArgs,
pos: SourcePos,
body: FuncImpl,
}
#[derive(Clone)]
pub enum FuncImpl {
Builtin(Arc<BuiltinFn>),
/// A user-defined function is really a closure, it has a scope
/// where it is defined and a body of items.
UserDefined(ScopeRef, Vec<sass::Item>),
}
impl PartialOrd for FuncImpl {
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
match (self, rhs) {
(&FuncImpl::Builtin(..), &FuncImpl::Builtin(..)) => None,
(&FuncImpl::Builtin(..), &FuncImpl::UserDefined(..)) => {
Some(cmp::Ordering::Less)
}
(&FuncImpl::UserDefined(..), &FuncImpl::Builtin(..)) => {
Some(cmp::Ordering::Greater)
}
(
&FuncImpl::UserDefined(ref _sa, ref a),
&FuncImpl::UserDefined(ref _sb, ref b),
) => a.partial_cmp(b),
}
}
}
impl cmp::PartialEq for FuncImpl {
fn eq(&self, rhs: &FuncImpl) -> bool {
match (self, rhs) {
(
&FuncImpl::UserDefined(ref sa, ref a),
&FuncImpl::UserDefined(ref sb, ref b),
) => ScopeRef::is_same(sa, sb) && a == b,
(&FuncImpl::Builtin(ref a), &FuncImpl::Builtin(ref b)) => {
// Each builtin function is only created once, so this
// should be ok.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(a, b)
}
_ => false,
}
}
}
impl cmp::Eq for FuncImpl {}
impl fmt::Debug for FuncImpl {
fn fmt(&self, out: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FuncImpl::Builtin(_) => write!(out, "(builtin function)"),
FuncImpl::UserDefined(..) => {
write!(out, "(user-defined function)")
}
}
}
}
trait Functions {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
);
}
impl Functions for Scope {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin(&self.get_name(), &name, args, body);
self.define_function(name, f);
}
}
impl Function {
/// Get a built-in function by name.
pub fn get_builtin(name: &Name) -> Option<&'static Function> {
FUNCTIONS.get(name)
}
/// Create a new `Function` from a rust implementation.
///
/// Note: This does not expose the function in any scope, it just
/// creates it.
pub fn builtin(
module: &str,
name: &Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) -> Self {
let pos = SourcePos::mock_function(name, &args, module);
Function {
args,
pos,
body: FuncImpl::Builtin(body),
}
}
/// Create a new `Function` from a scss implementation.
///
/// The scope is where the function is defined, used to bind any
/// non-parameter names in the body.
pub fn closure(
args: FormalArgs,
pos: SourcePos,
scope: ScopeRef,
body: Vec<sass::Item>,
) -> Self |
/// Call the function from a given scope and with a given set of
/// arguments.
pub fn call(
&self,
callscope: ScopeRef,
args: CallArgs,
) -> Result<Value, Error> {
let cs = "%%CALLING_SCOPE%%";
match self.body {
FuncImpl::Builtin(ref body) => {
let s = self.do_eval_args(
ScopeRef::new_global(callscope.get_format()),
args,
)?;
s.define_module(cs.into(), callscope);
body(&s)
}
FuncImpl::UserDefined(ref defscope, ref body) => {
let s = self.do_eval_args(defscope.clone(), args)?;
s.define_module(cs.into(), callscope);
Ok(s.eval_body(body)?.unwrap_or(Value::Null))
}
}
.map(Value::into_calculated)
}
fn do_eval_args(
&self,
def: ScopeRef,
args: CallArgs,
) -> Result<ScopeRef, Error> {
self.args.eval(def, args).map_err(|e| match e {
sass::ArgsError::Eval(e) => e,
ae => Error::BadArguments(ae.to_string(), self.pos.clone()),
})
}
}
lazy_static! {
static ref MODULES: BTreeMap<&'static str, Scope> = {
let mut modules = BTreeMap::new();
modules.insert("sass:color", color::create_module());
modules.insert("sass:list", list::create_module());
modules.insert("sass:map", map::create_module());
modules.insert("sass:math", math::create_module());
modules.insert("sass:meta", meta::create_module());
modules.insert("sass:selector", selector::create_module());
modules.insert("sass:string", string::create_module());
modules
};
}
/// Get a global module (e.g. `sass:math`) by name.
pub fn get_global_module(name: &str) -> Option<ScopeRef> {
MODULES.get(name).map(ScopeRef::Builtin)
}
type FunctionMap = BTreeMap<Name, Function>;
impl Functions for FunctionMap {
fn builtin_fn(
&mut self,
name: Name,
args: FormalArgs,
body: Arc<BuiltinFn>,
) {
let f = Function::builtin("", &name, args, body);
self.insert(name, f);
}
}
lazy_static! {
static ref FUNCTIONS: FunctionMap = {
let mut f = BTreeMap::new();
def!(f, if(condition, if_true, if_false), |s| {
if s.get("condition")?.is_true() {
Ok(s.get("if_true")?)
} else {
Ok(s.get("if_false")?)
}
});
color::expose(MODULES.get("sass:color").unwrap(), &mut f);
list::expose(MODULES.get("sass:list").unwrap(), &mut f);
map::expose(MODULES.get("sass:map").unwrap(), &mut f);
math::expose(MODULES.get("sass:math").unwrap(), &mut f);
meta::expose(MODULES.get("sass:meta").unwrap(), &mut f);
selector::expose(MODULES.get("sass:selector").unwrap(), &mut f);
string::expose(MODULES.get("sass:string").unwrap(), &mut f);
f
};
}
// argument helpers for the actual functions
trait CheckedArg<T> {
fn named(self, name: Name) -> Result<T, Error>;
}
impl<T> CheckedArg<T> for Result<T, String> {
fn named(self, name: Name) -> Result<T, Error> {
self.map_err(|e| Error::BadArgument(name, e))
}
}
fn get_checked<T, F>(s: &Scope, name: Name, check: F) -> Result<T, Error>
where
F: Fn(Value) -> Result<T, String>,
{
check(s.get(name.as_ref())?).named(name)
}
fn get_opt_check<T, F>(
s: &Scope,
name: Name,
check: F,
) -> Result<Option<T>, Error>
where
F: Fn(Value) -> Result<T, String>,
{
match s.get(name.as_ref())? {
Value::Null => Ok(None),
v => check(v).named(name).map(Some),
}
}
fn get_numeric(s: &Scope, name: &str) -> Result<Numeric, Error> {
get_checked(s, name.into(), check::numeric)
}
fn get_integer(s: &Scope, name: Name) -> Result<i64, Error> {
get_checked(s, name, check::unitless_int)
}
fn get_string(s: &Scope, name: &'static str) -> Result<CssString, Error> {
get_checked(s, name.into(), check::string)
}
fn get_va_list(s: &Scope, name: Name) -> Result<Vec<Value>, Error> {
get_checked(s, name, check::va_list)
}
fn expected_to<'a, T>(value: &'a T, cond: &str) -> String
where
Formatted<'a, T>: std::fmt::Display,
{
format!(
"Expected {} to {}.",
Formatted {
value,
format: Format::introspect()
},
cond,
)
}
mod check {
use super::{expected_to, is_not};
use crate::css::{CssString, Value};
use crate::value::{ListSeparator, Number, Numeric};
pub fn numeric(v: Value) -> Result<Numeric, String> {
v.numeric_value().map_err(|v| is_not(&v, "a number"))
}
pub fn int(v: Value) -> Result<i64, String> {
numeric(v)?
.value
.into_integer()
.map_err(|v| is_not(&v, "an int"))
}
pub fn unitless(v: Value) -> Result<Number, String> {
let val = numeric(v)?;
if val.is_no_unit() {
Ok(val.value)
} else {
Err(expected_to(&val, "have no units"))
}
}
pub fn unitless_int(v: Value) -> Result<i64, String> {
unitless(v)?
.into_integer()
.map_err(|v| is_not(&v, "an int"))
}
pub fn string(v: Value) -> Result<CssString, String> {
match v {
Value::Literal(s) => Ok(s),
Value::Call(name, args) => {
Ok(format!("{}({})", name, args).into())
}
v => Err(is_not(&v, "a string")),
}
}
pub fn va_list(v: Value) -> Result<Vec<Value>, String> {
match v {
Value::ArgList(args) => {
args.check_no_named().map_err(|e| e.to_string())?;
Ok(args.positional)
}
Value::List(v, Some(ListSeparator::Comma), _) => Ok(v),
single => Ok(vec![single]),
}
}
pub fn va_list_nonempty(v: Value) -> Result<Vec<Value>, String> {
let result = va_list(v)?;
if result.is_empty() {
// TODO: Parameterize "selector"? Or rename fn va_selectors?
Err("At least one selector must be passed.".into())
} else {
Ok(result)
}
}
}
#[test]
fn test_rgb() -> Result<(), Box<dyn std::error::Error>> {
use crate::parser::code_span;
use crate::parser::formalargs::call_args;
use crate::value::Rgba;
let scope = ScopeRef::new_global(Default::default());
assert_eq!(
FUNCTIONS.get(&name!(rgb)).unwrap().call(
scope.clone(),
call_args(code_span(b"(17, 0, 225)"))?.1.evaluate(scope)?
)?,
Value::Color(Rgba::from_rgb(17, 0, 225).into(), None)
);
Ok(())
}
#[test]
fn test_nth() {
assert_eq!("foo", do_evaluate(&[("x", "foo, bar")], b"nth($x, 1);"))
}
#[cfg(test)]
use crate::variablescope::test::do_evaluate;
| {
Function {
args,
pos,
body: FuncImpl::UserDefined(scope, body),
}
} | identifier_body |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if!Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if!status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2:.foo1, bar2:.bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"## | let filter = String::from("{foo:.foo, baz:.bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "[email protected]", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "[email protected]"
},
"work": {
"phone": "212-555-1234",
"email": "[email protected]"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
} | );
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() { | random_line_split |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if!Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if!status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() |
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo:.foo, baz:.bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "[email protected]", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "[email protected]"
},
"work": {
"phone": "212-555-1234",
"email": "[email protected]"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
}
| {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
} | identifier_body |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if!Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if!status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn | (
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2:.foo1, bar2:.bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo:.foo, baz:.bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "[email protected]", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "[email protected]"
},
"work": {
"phone": "212-555-1234",
"email": "[email protected]"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
}
| run_jq5_on_file | identifier_name |
main.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow;
use fuchsia_async;
use futures::future::join_all;
use io::BufWriter;
use json5format::{Json5Format, ParsedDocument};
use std::{
ffi::OsString,
io::{self, Read, Write},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use structopt::StructOpt;
mod reader;
mod traverser;
/// Spawns a `jq` process with the specified filter and pipes `json_string` into its stdin. Returns
/// its jq output or an error if it produces an error. If `jq_path` is `None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if!Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if!status.success() | else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2:.foo1, bar2:.bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo:.foo, baz:.bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
"first": "John",
"middle": "Jacob"
},
"children": [
"Buffy",
"Biff",
"Balto"
],
// Consider adding a note field to the `other` contact option
"contact_options": [
{
"home": {
"email": "[email protected]", // This was the original user id.
// Now user id's are hash values.
"phone": "212-555-4321"
},
"other": {
"email": "[email protected]"
},
"work": {
"phone": "212-555-1234",
"email": "[email protected]"
}
}
],
"address": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
/* Update schema to support multiple addresses:
"work": {
"city": "Anytown",
"country": "USA",
"state": "New York",
"street": "101 Main Street"
}
*/
}
}
"##,
);
file.write_all(json5_string.as_bytes()).unwrap();
let (parsed_json5, json_string) = reader::read_json5_fromfile(&tmp_path).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&".".to_string(), parsed_json5, json_string, &jq_path).await.unwrap(),
run_jq5_on_file(&".".to_string(), &tmp_path, &jq_path).await.unwrap()
)
}
}
| {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} | conditional_block |
lib.rs | //! ### Request/Response
//!
//! ```no_run
//! # use std::time::Duration;
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! let resp = nc.request("foo", "Help me?")?;
//!
//! // With a timeout.
//! let resp = nc.request_timeout("foo", "Help me?", Duration::from_secs(2))?;
//!
//! // With multiple responses.
//! for msg in nc.request_multi("foo", "Help")?.iter() {}
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("foo", &reply, "Help me!")?;
//! let response = rsub.iter().take(1);
//! # Ok(()) }
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(
feature = "fault_injection",
deny(
future_incompatible,
missing_copy_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused,
unused_qualifications
)
)]
#![cfg_attr(feature = "fault_injection", deny(
// over time, consider enabling the following commented-out lints:
// clippy::else_if_without_else,
// clippy::indexing_slicing,
// clippy::multiple_crate_versions,
// clippy::missing_const_for_fn,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::filter_map_next,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::manual_filter_map,
clippy::manual_find_map,
clippy::map_flatten,
clippy::map_unwrap_or,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::wildcard_dependencies,
clippy::wildcard_enum_match_arm,
))]
#![allow(
clippy::match_like_matches_macro,
clippy::await_holding_lock,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::wildcard_enum_match_arm,
clippy::module_name_repetitions
)]
// As this is a deprecated client, we don't want warnings from new lints to make CI red.
#![allow(clippy::all)]
#![allow(warnings)]
/// Async-enabled NATS client.
pub mod asynk;
mod auth_utils;
mod client;
mod connect;
mod connector;
mod message;
mod options;
mod proto;
mod secure_wipe;
mod subscription;
/// Header constants and types.
pub mod header;
/// `JetStream` stream management and consumers.
pub mod jetstream;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod kv;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod object_store;
#[cfg(feature = "fault_injection")]
mod fault_injection;
#[cfg(feature = "fault_injection")]
use fault_injection::{inject_delay, inject_io_failure};
#[cfg(not(feature = "fault_injection"))]
fn inject_delay() {}
#[cfg(not(feature = "fault_injection"))]
fn inject_io_failure() -> io::Result<()> {
Ok(())
}
// comment out until we reach MSRV 1.54.0
// #[doc = include_str!("../docs/migration-guide-0.17.0.md")]
// #[derive(Copy, Clone)]
// pub struct Migration0170;
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "this has been renamed to `Options`.")]
pub type ConnectionOptions = Options;
#[doc(hidden)]
#[deprecated(since = "0.17.0", note = "this has been moved to `header::HeaderMap`.")]
pub type Headers = HeaderMap;
pub use header::HeaderMap;
use std::{
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn | (
&self,
subject: &str,
maybe_headers: Option<&HeaderMap>,
maybe_timeout: Option<Duration>,
msg: impl AsRef<[u8]>,
) -> io::Result<Message> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), maybe_headers, msg)?;
// Wait for the response
let result = if let Some(timeout) = maybe_timeout {
sub.next_timeout(timeout)
} else if let Some(msg) = sub.next() {
Ok(msg)
} else {
Err(ErrorKind::ConnectionReset.into())
};
// Check for no responder status.
if let Ok(msg) = result.as_ref() {
if msg.is_no_responders() {
return Err(Error::new(ErrorKind::NotFound, "no responders"));
}
}
result
}
/// Publish a message on the given subject as a request and allow multiple
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() { | request_with_headers_or_timeout | identifier_name |
lib.rs |
//! ### Request/Response
//!
//! ```no_run
//! # use std::time::Duration;
//! # fn main() -> std::io::Result<()> {
//! let nc = nats::connect("demo.nats.io")?;
//! let resp = nc.request("foo", "Help me?")?;
//!
//! // With a timeout.
//! let resp = nc.request_timeout("foo", "Help me?", Duration::from_secs(2))?;
//!
//! // With multiple responses.
//! for msg in nc.request_multi("foo", "Help")?.iter() {}
//!
//! // Publish a request manually.
//! let reply = nc.new_inbox();
//! let rsub = nc.subscribe(&reply)?;
//! nc.publish_request("foo", &reply, "Help me!")?;
//! let response = rsub.iter().take(1);
//! # Ok(()) }
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(
feature = "fault_injection",
deny(
future_incompatible,
missing_copy_implementations, | unsafe_code,
unused,
unused_qualifications
)
)]
#![cfg_attr(feature = "fault_injection", deny(
// over time, consider enabling the following commented-out lints:
// clippy::else_if_without_else,
// clippy::indexing_slicing,
// clippy::multiple_crate_versions,
// clippy::missing_const_for_fn,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::filter_map_next,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::manual_filter_map,
clippy::manual_find_map,
clippy::map_flatten,
clippy::map_unwrap_or,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::wildcard_dependencies,
clippy::wildcard_enum_match_arm,
))]
#![allow(
clippy::match_like_matches_macro,
clippy::await_holding_lock,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::wildcard_enum_match_arm,
clippy::module_name_repetitions
)]
// As this is a deprecated client, we don't want warnings from new lints to make CI red.
#![allow(clippy::all)]
#![allow(warnings)]
/// Async-enabled NATS client.
pub mod asynk;
mod auth_utils;
mod client;
mod connect;
mod connector;
mod message;
mod options;
mod proto;
mod secure_wipe;
mod subscription;
/// Header constants and types.
pub mod header;
/// `JetStream` stream management and consumers.
pub mod jetstream;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod kv;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod object_store;
#[cfg(feature = "fault_injection")]
mod fault_injection;
#[cfg(feature = "fault_injection")]
use fault_injection::{inject_delay, inject_io_failure};
#[cfg(not(feature = "fault_injection"))]
fn inject_delay() {}
#[cfg(not(feature = "fault_injection"))]
fn inject_io_failure() -> io::Result<()> {
Ok(())
}
// comment out until we reach MSRV 1.54.0
// #[doc = include_str!("../docs/migration-guide-0.17.0.md")]
// #[derive(Copy, Clone)]
// pub struct Migration0170;
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "this has been renamed to `Options`.")]
pub type ConnectionOptions = Options;
#[doc(hidden)]
#[deprecated(since = "0.17.0", note = "this has been moved to `header::HeaderMap`.")]
pub type Headers = HeaderMap;
pub use header::HeaderMap;
use std::{
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers_or_timeout(
&self,
subject: &str,
maybe_headers: Option<&HeaderMap>,
maybe_timeout: Option<Duration>,
msg: impl AsRef<[u8]>,
) -> io::Result<Message> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), maybe_headers, msg)?;
// Wait for the response
let result = if let Some(timeout) = maybe_timeout {
sub.next_timeout(timeout)
} else if let Some(msg) = sub.next() {
Ok(msg)
} else {
Err(ErrorKind::ConnectionReset.into())
};
// Check for no responder status.
if let Ok(msg) = result.as_ref() {
if msg.is_no_responders() {
return Err(Error::new(ErrorKind::NotFound, "no responders"));
}
}
result
}
/// Publish a message on the given subject as a request and allow multiple
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() {
| missing_docs,
nonstandard_style,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts, | random_line_split |
lib.rs |
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers_or_timeout(
&self,
subject: &str,
maybe_headers: Option<&HeaderMap>,
maybe_timeout: Option<Duration>,
msg: impl AsRef<[u8]>,
) -> io::Result<Message> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), maybe_headers, msg)?;
// Wait for the response
let result = if let Some(timeout) = maybe_timeout {
sub.next_timeout(timeout)
} else if let Some(msg) = sub.next() {
Ok(msg)
} else {
Err(ErrorKind::ConnectionReset.into())
};
// Check for no responder status.
if let Ok(msg) = result.as_ref() {
if msg.is_no_responders() {
return Err(Error::new(ErrorKind::NotFound, "no responders"));
}
}
result
}
/// Publish a message on the given subject as a request and allow multiple
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() {
"" => Err(Error::new(
ErrorKind::Other,
&*format!(
"client_ip was not provided by the server. It is \
supported on servers above version 2.1.6. The server \
version is {}",
info.version
),
)),
ip => match ip.parse() {
Ok(addr) => Ok(addr),
Err(_) => Err(Error::new(
ErrorKind::InvalidData,
&*format!(
"client_ip provided by the server cannot be parsed. \
The server provided IP: {}",
info.client_ip
),
)),
},
}
}
/// Returns the client ID as known by the most recently connected server.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_id());
/// # Ok(())
/// # }
/// ```
pub fn client_id(&self) -> u64 {
self.0.client.server_info().client_id
}
/// Send an unsubscription for all subs then flush the connection, allowing
/// any unprocessed messages to be handled by a handler function if one
/// is configured.
///
/// After the flush returns, we know that a round-trip to the server has
/// happened after it received our unsubscription, so we shut down the
/// subscriber afterwards.
///
/// A similar method exists for the `Subscription` struct which will drain
/// a single `Subscription` without shutting down the entire connection
/// afterward.
///
/// # Example
/// ```no_run
/// # use std::sync::{Arc, atomic::{AtomicBool, Ordering::SeqCst}};
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let received = Arc::new(AtomicBool::new(false));
/// let received_2 = received.clone();
///
/// nc.subscribe("test.drain")?.with_handler(move |m| {
/// received_2.store(true, SeqCst);
/// Ok(())
/// });
///
/// nc.publish("test.drain", "message")?;
/// nc.drain()?;
///
/// # std::thread::sleep(std::time::Duration::from_secs(1));
///
/// assert!(received.load(SeqCst));
///
/// # Ok(())
/// # }
/// ```
pub fn drain(&self) -> io::Result<()> {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT)?;
self.0.client.close();
Ok(())
}
/// Publish a message which may have a reply subject or headers set.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo.headers")?;
/// let headers = [("header1", "value1"), ("header2", "value2")]
/// .iter()
/// .collect();
/// let reply_to = None;
/// nc.publish_with_reply_or_headers("foo.headers", reply_to, Some(&headers), "Hello World!")?;
/// nc.flush()?;
/// let message = sub.next_timeout(std::time::Duration::from_secs(2)).unwrap();
/// assert_eq!(message.headers.unwrap().len(), 2);
/// # Ok(())
/// # }
/// ```
pub fn publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> io::Result<()> | {
self.0.client.publish(subject, reply, headers, msg.as_ref())
} | identifier_body |
|
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum | {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
(+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
}
| SexpRes | identifier_name |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() { | (+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
} | let sexps = "
92
(+ 62 30)
(/ 92 0)
nan | random_line_split |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() | "R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
(+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
}
| {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(), | identifier_body |
s_expressions.rs | //! In this tutorial, we will write parser
//! and evaluator of arithmetic S-expressions,
//! which look like this:
//! ```
//! (+ (* 15 2) 62)
//! ```
/// Currently, rowan doesn't have a hook to add your own interner,
/// but `SmolStr` should be a "good enough" type for representing
/// tokens.
/// Additionally, rowan uses `TextUnit` and `TextRange` types to
/// represent utf8 offsets and ranges.
use rowan::SmolStr;
/// Let's start with defining all kinds of tokens and
/// composite nodes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(non_camel_case_types)]
#[repr(u16)]
enum SyntaxKind {
L_PAREN = 0, // '('
R_PAREN, // ')'
WORD, // '+', '15'
WHITESPACE, // whitespaces is explicit
ERROR, // as well as errors
// composite nodes
LIST, // `(+ 2 3)`
ATOM, // `+`, `15`, wraps a WORD token
ROOT, // top-level node: a list of s-expressions
}
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else |
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
(+ (* 15 2) 62)
";
let root = parse(sexps);
let res = root.syntax().sexps().map(|it| it.eval()).collect::<Vec<_>>();
eprintln!("{:?}", res);
assert_eq!(res, vec![Some(92), Some(92), None, None, Some(92),])
}
fn lex(text: &str) -> Vec<(SyntaxKind, SmolStr)> {
fn tok(t: SyntaxKind) -> m_lexer::TokenKind {
m_lexer::TokenKind(rowan::SyntaxKind::from(t).0)
}
fn kind(t: m_lexer::TokenKind) -> SyntaxKind {
match t.0 {
0 => L_PAREN,
1 => R_PAREN,
2 => WORD,
3 => WHITESPACE,
4 => ERROR,
_ => unreachable!(),
}
}
let lexer = m_lexer::LexerBuilder::new()
.error_token(tok(ERROR))
.tokens(&[
(tok(L_PAREN), r"\("),
(tok(R_PAREN), r"\)"),
(tok(WORD), r"[^\s()]+"),
(tok(WHITESPACE), r"\s+"),
])
.build();
lexer
.tokenize(text)
.into_iter()
.map(|t| (t.len, kind(t.kind)))
.scan(0usize, |start_offset, (len, kind)| {
let s: SmolStr = text[*start_offset..*start_offset + len].into();
*start_offset += len;
Some((kind, s))
})
.collect()
}
| {
None
} | conditional_block |
ifd.rs | //! Function for reading TIFF tags
use std::io::{self, Read, Seek};
use std::collections::{HashMap};
use super::stream::{ByteOrder, SmartReader, EndianReader};
use self::Value::{Unsigned, List};
macro_rules! tags {
{$(
$tag:ident
$val:expr;
)*} => {
/// TIFF tag
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum Tag {
$($tag,)*
Unknown(u16)
}
impl Tag {
pub fn from_u16(n: u16) -> Tag {
$(if n == $val { Tag::$tag } else)* {
Tag::Unknown(n)
}
}
}
}
}
// taken from https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf Appendix A
// TagName Value; // in HEx TagTYPE Number of Values
tags! {
NewSubfileType 254; // FE LONG 1
SubfileType 255; // FF SHORT 1
ImageWidth 256; // 100 SHORT or LONG 1
ImageLength 257; // 101 SHORT or LONG 1
BitsPerSample 258; // 102 SHORT SamplesPerPixel
Compression 259; // 103 SHORT 1
PhotometricInterpretation 262; // 106 SHORT
Threshholding 263; // 107 SHORT 1
CellWidth 264; // 108 SHORT 1
CellLength 265; // 109 SHORT 1
FillOrder 266; // 10A SHORT 1
DocumentName 269; // 10D ASCII
ImageDescription 270; // 10E ASCII
Make 271; // 10F ASCII
Model 272; // 110 ASCII
StripOffsets 273; // 111 SHORT or LONG StripsPerImage
Orientation 274; // 112 SHORT 1
SamplesPerPixel 277; // 115 SHORT 1
RowsPerStrip 278; // 116 SHORT or LONG 1
StripByteCounts 279; // 117 LONG or SHORT StripsPerImage
MinSampleValue 280; // 118 SHORT SamplesPerPixel
MaxSampleValue 281; // 119 SHORT SamplesPerPixel
XResolution 282; // 11A RATIONAL 1
YResolution 283; // 11B RATIONAL 1
PlanarConfiguration 284; // 11C SHORT 1
PageName 285; // 11D ASCII
XPosition 286; // 11E RATIONAL
YPosition 287; // 11F RATIONAL
FreeOffsets 288; // 120 LONG
FreeByteCounts 289; // 121 LONG
GrayResponseUnit 290; // 122 SHORT
GrayResponseCurve 291; // 123 SHORT 2**BitsPerSample
T4Options 292; // 124 LONG 1
T6Options 293; // 125 LONG 1
ResolutionUnit 296; // 128 SHORT 1
PageNumber 297; // 129 SHORT 2
TransferFunction 301; // 12D SHORT
Software 305; // 131 ASCII
DateTime 306; // 132 ASCII 20
Artist 315; // 13B ASCII
HostComputer 316; // 13C ASCII
Predictor 317; // 13D SHORT 1
WhitePoint 318; // 13E RATIONAL 2
PrimaryChromaticities 319; // 13F RATIONAL 6
ColorMap 320; // 140 SHORT 3 * (2**BitsPerSample)
HalftoneHints 321; // 141 SHORT 2
TileWidth 322; // 142 SHORT or LONG 1
TileLength 323; // 143 SHORT or LONG 1
TileOffsets 324; // 144 LONG TilesPerImage
TileByteCounts 325; // 145 SHORT or LONG TilesPerImage
InkSet 332; // 14C SHORT 1
InkNames 333; // 14D ASCII t
NumberOfInks 334; // 14E SHORT 1
DotRange 336; // 150 BYTE or SHORT 2, or 2*
TargetPrinter 337; // 151 ASCII any
ExtraSamples 338; // 152 BYTE number of extra compo
SampleFormat 339; // 153 SHORT SamplesPerPixel
SMinSampleValue 340; // 154 Any SamplesPerPixel
SMaxSampleValue 341; // 155 Any SamplesPerPixel
TransferRange 342; // 156 SHORT 6
JPEGProc 512; // 200 SHORT 1
JPEGInterchangeFormat 513; // 201 LONG 1
JPEGInterchangeFormatLngth 514; // 202 LONG 1
JPEGRestartInterval 515; // 203 SHORT 1
JPEGLosslessPredictors 517; // 205 SHORT SamplesPerPixel
JPEGPointTransforms 518; // 206 SHORT SamplesPerPixel
JPEGQTables 519; // 207 LONG SamplesPerPixel
JPEGDCTables 520; // 208 LONG SamplesPerPixel
JPEGACTables 521; // 209 LONG SamplesPerPixel
YCbCrCoefficients 529; // 211 RATIONAL 3
YCbCrSubSampling 530; // 212 SHORT 2
YCbCrPositioning 531; // 213 SHORT 1
ReferenceBlackWhite 532; // 214 LONG 2*SamplesPerPixel
Copyright 33432; // 8298 ASCII Any
}
// Note: These tags appear in the order they are mentioned in the TIFF reference
// https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
// tags!{
// // Baseline tags:
// Artist 315; // TODO add support
// // grayscale images PhotometricInterpretation 1 or 3
// BitsPerSample 258;
// CellLength 265; // TODO add support
// CellWidth 264; // TODO add support
// // palette-color images (PhotometricInterpretation 3)
// ColorMap 320; // TODO add support
// Compression 259; // TODO add support for 2 and 32773
// Copyright 33432; // TODO add support
// DateTime 306; // TODO add support
// ExtraSamples 338; // TODO add support
// FillOrder 266; // TODO add support
// FreeByteCounts 289; // TODO add support
// FreeOffsets 288; // TODO add support
// GrayResponseCurve 291; // TODO add support
// GrayResponseUnit 290; // TODO add support
// HostComputer 316; // TODO add support
// ImageDescription 270; // TODO add support
// ImageLength 257;
// ImageWidth 256;
// Make 271; // TODO add support
// MaxSampleValue 281; // TODO add support
// MinSampleValue 280; // TODO add support
// Model 272; // TODO add support
// NewSubfileType 254; // TODO add support
// Orientation 274; // TODO add support
// PhotometricInterpretation 262;
// PlanarConfiguration 284;
// ResolutionUnit 296; // TODO add support
// RowsPerStrip 278;
// SamplesPerPixel 277;
// Software 305;
// StripByteCounts 279;
// StripOffsets 273;
// SubfileType 255; // TODO add support
// Threshholding 263; // TODO add support
// XResolution 282;
// YResolution 283;
// // Advanced tags
// Predictor 317;
// // TIFF Extensions
// // Section 11 CCITT Bilevel Encodings
// // Compression
// T4Options 292;
// T6Options 293;
// // Section 12 Document Storagte and Retrieval
// DocumentName 269;
// PageName 285;
// PageNumber 297;
// XPosition 286;
// YPosition 287;
// // Section 13: LZW Compression
// // Section 14: Differencing Predictor
// // Section 15: Tiled Images -- Do not use both striporiented and tile-oriented fields in the same TIFF file
// TileWidth 322;
// TileLength 323;
// TileOffsets 324;
// TileByteCounts 325;
// // Section 16: CMYK Images
// InkSet 332;
// NumberOfInks 334;
// InkNames 333;
// DotRange 336;
// TargetPrinter 337;
// // Section 17: HalftoneHints
// HalftoneHints 321;
// // Section 18: Associated Alpha Handling
// ExtraSamples 338;
// // Section 19: Data Sample Format
// SampleFormat 339;
// SMinSampleValue 340;
// SMaxSampleValue 341;
// // Section 20: RGB Image Colorimetry
// WhitePoint 318;
// PrimaryChromaticities 319;
// TransferFunction 301;
// TransferRange 342;
// ReferenceBlackWhite 532;
// // Section 21: YCbCr Images
// }
enum_from_primitive! {
#[derive(Clone, Copy, Debug)]
pub enum Type {
BYTE = 1,
ASCII = 2,
SHORT = 3,
LONG = 4,
RATIONAL = 5,
SBYTE = 6,
UNDEFINED = 7,
SSHORT = 8,
SLONG = 9,
SRATIONAL = 10,
FLOAT = 11,
DOUBLE = 12,
}
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value {
//Signed(i32),
Unsigned(u32),
List(Vec<Value>)
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value_Type {
Value,
Offset
}
impl Value {
pub fn as_u32(self) -> ::image::ImageResult<u32> {
match self {
Unsigned(val) => Ok(val),
val => Err(::image::ImageError::FormatError(format!(
"Expected unsigned integer, {:?} found.", val
)))
}
}
pub fn as_u32_vec(self) -> ::image::ImageResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec.into_iter() {
new_vec.push(try!(v.as_u32()))
}
Ok(new_vec)
},
Unsigned(val) => Ok(vec![val]),
//_ => Err(::image::FormatError("Tag data malformed.".to_string()))
}
}
}
pub struct Entry {
type_: Type,
count: u32,
offset: [u8; 4]
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!("Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_,
self.count,
&self.offset,
// String::from_utf8_lossy ( &self.offset ),
))
}
}
impl Entry {
pub fn | (type_: Type, count: u32, offset: [u8; 4] ) -> Entry {
Entry {
type_: type_,
count: count,
offset: offset,
}
}
/// Returns a mem_reader for the offset/value field
pub fn r(&self, byte_order: ByteOrder) -> SmartReader<io::Cursor<Vec<u8>>> {
SmartReader::wrap(
io::Cursor::new(self.offset.to_vec()),
byte_order
)
}
// Refactor this to remove the dependency on decoder,
pub fn val<R: Read + Seek>(&self, decoder: &mut super::TIFFDecoder<R>)
-> ::image::ImageResult<Value> {
let bo = decoder.byte_order();
match (self.type_, self.count) {
// TODO check if this could give wrong results
// at a different endianess of file/computer.
(Type::BYTE, 1) => Ok(Unsigned(self.offset[0] as u32)),
(Type::SHORT, 1) => Ok(Unsigned(try!(self.r(bo).read_u16()) as u32)),
(Type::SHORT, 2) => {
let mut r = self.r(bo);
Ok(List(vec![
Unsigned(try!(r.read_u16()) as u32),
Unsigned(try!(r.read_u16()) as u32)
]))
},
(Type::SHORT, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0.. n {
v.push(Unsigned(try!(decoder.read_short()) as u32))
}
Ok(List(v))
},
(Type::LONG, 1) => Ok(Unsigned(try!(self.r(bo).read_u32()))),
(Type::LONG, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0.. n {
v.push(Unsigned(try!(decoder.read_long())))
}
Ok(List(v))
}
_ => Err(::image::ImageError::UnsupportedError("Unsupported data type.".to_string()))
}
}
}
/// Type representing an Image File Directory
pub type Directory = HashMap<Tag, Entry>;
| new | identifier_name |
ifd.rs | //! Function for reading TIFF tags
use std::io::{self, Read, Seek};
use std::collections::{HashMap};
use super::stream::{ByteOrder, SmartReader, EndianReader};
use self::Value::{Unsigned, List};
macro_rules! tags {
{$(
$tag:ident
$val:expr;
)*} => {
/// TIFF tag
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum Tag {
$($tag,)*
Unknown(u16)
}
impl Tag {
pub fn from_u16(n: u16) -> Tag {
$(if n == $val { Tag::$tag } else)* {
Tag::Unknown(n)
}
}
}
}
}
// taken from https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf Appendix A
// TagName Value; // in HEx TagTYPE Number of Values
tags! {
NewSubfileType 254; // FE LONG 1
SubfileType 255; // FF SHORT 1
ImageWidth 256; // 100 SHORT or LONG 1
ImageLength 257; // 101 SHORT or LONG 1
BitsPerSample 258; // 102 SHORT SamplesPerPixel
Compression 259; // 103 SHORT 1
PhotometricInterpretation 262; // 106 SHORT
Threshholding 263; // 107 SHORT 1
CellWidth 264; // 108 SHORT 1
CellLength 265; // 109 SHORT 1
FillOrder 266; // 10A SHORT 1
DocumentName 269; // 10D ASCII
ImageDescription 270; // 10E ASCII
Make 271; // 10F ASCII
Model 272; // 110 ASCII
StripOffsets 273; // 111 SHORT or LONG StripsPerImage
Orientation 274; // 112 SHORT 1
SamplesPerPixel 277; // 115 SHORT 1
RowsPerStrip 278; // 116 SHORT or LONG 1
StripByteCounts 279; // 117 LONG or SHORT StripsPerImage
MinSampleValue 280; // 118 SHORT SamplesPerPixel
MaxSampleValue 281; // 119 SHORT SamplesPerPixel
XResolution 282; // 11A RATIONAL 1
YResolution 283; // 11B RATIONAL 1
PlanarConfiguration 284; // 11C SHORT 1
PageName 285; // 11D ASCII
XPosition 286; // 11E RATIONAL
YPosition 287; // 11F RATIONAL
FreeOffsets 288; // 120 LONG
FreeByteCounts 289; // 121 LONG
GrayResponseUnit 290; // 122 SHORT
GrayResponseCurve 291; // 123 SHORT 2**BitsPerSample
T4Options 292; // 124 LONG 1
T6Options 293; // 125 LONG 1
ResolutionUnit 296; // 128 SHORT 1
PageNumber 297; // 129 SHORT 2
TransferFunction 301; // 12D SHORT
Software 305; // 131 ASCII
DateTime 306; // 132 ASCII 20
Artist 315; // 13B ASCII
HostComputer 316; // 13C ASCII
Predictor 317; // 13D SHORT 1
WhitePoint 318; // 13E RATIONAL 2
PrimaryChromaticities 319; // 13F RATIONAL 6
ColorMap 320; // 140 SHORT 3 * (2**BitsPerSample)
HalftoneHints 321; // 141 SHORT 2
TileWidth 322; // 142 SHORT or LONG 1
TileLength 323; // 143 SHORT or LONG 1
TileOffsets 324; // 144 LONG TilesPerImage
TileByteCounts 325; // 145 SHORT or LONG TilesPerImage
InkSet 332; // 14C SHORT 1
InkNames 333; // 14D ASCII t
NumberOfInks 334; // 14E SHORT 1
DotRange 336; // 150 BYTE or SHORT 2, or 2*
TargetPrinter 337; // 151 ASCII any
ExtraSamples 338; // 152 BYTE number of extra compo
SampleFormat 339; // 153 SHORT SamplesPerPixel
SMinSampleValue 340; // 154 Any SamplesPerPixel
SMaxSampleValue 341; // 155 Any SamplesPerPixel
TransferRange 342; // 156 SHORT 6
JPEGProc 512; // 200 SHORT 1
JPEGInterchangeFormat 513; // 201 LONG 1
JPEGInterchangeFormatLngth 514; // 202 LONG 1
JPEGRestartInterval 515; // 203 SHORT 1
JPEGLosslessPredictors 517; // 205 SHORT SamplesPerPixel
JPEGPointTransforms 518; // 206 SHORT SamplesPerPixel
JPEGQTables 519; // 207 LONG SamplesPerPixel
JPEGDCTables 520; // 208 LONG SamplesPerPixel
JPEGACTables 521; // 209 LONG SamplesPerPixel
YCbCrCoefficients 529; // 211 RATIONAL 3
YCbCrSubSampling 530; // 212 SHORT 2
YCbCrPositioning 531; // 213 SHORT 1
ReferenceBlackWhite 532; // 214 LONG 2*SamplesPerPixel
Copyright 33432; // 8298 ASCII Any
}
| // tags!{
// // Baseline tags:
// Artist 315; // TODO add support
// // grayscale images PhotometricInterpretation 1 or 3
// BitsPerSample 258;
// CellLength 265; // TODO add support
// CellWidth 264; // TODO add support
// // palette-color images (PhotometricInterpretation 3)
// ColorMap 320; // TODO add support
// Compression 259; // TODO add support for 2 and 32773
// Copyright 33432; // TODO add support
// DateTime 306; // TODO add support
// ExtraSamples 338; // TODO add support
// FillOrder 266; // TODO add support
// FreeByteCounts 289; // TODO add support
// FreeOffsets 288; // TODO add support
// GrayResponseCurve 291; // TODO add support
// GrayResponseUnit 290; // TODO add support
// HostComputer 316; // TODO add support
// ImageDescription 270; // TODO add support
// ImageLength 257;
// ImageWidth 256;
// Make 271; // TODO add support
// MaxSampleValue 281; // TODO add support
// MinSampleValue 280; // TODO add support
// Model 272; // TODO add support
// NewSubfileType 254; // TODO add support
// Orientation 274; // TODO add support
// PhotometricInterpretation 262;
// PlanarConfiguration 284;
// ResolutionUnit 296; // TODO add support
// RowsPerStrip 278;
// SamplesPerPixel 277;
// Software 305;
// StripByteCounts 279;
// StripOffsets 273;
// SubfileType 255; // TODO add support
// Threshholding 263; // TODO add support
// XResolution 282;
// YResolution 283;
// // Advanced tags
// Predictor 317;
// // TIFF Extensions
// // Section 11 CCITT Bilevel Encodings
// // Compression
// T4Options 292;
// T6Options 293;
// // Section 12 Document Storagte and Retrieval
// DocumentName 269;
// PageName 285;
// PageNumber 297;
// XPosition 286;
// YPosition 287;
// // Section 13: LZW Compression
// // Section 14: Differencing Predictor
// // Section 15: Tiled Images -- Do not use both striporiented and tile-oriented fields in the same TIFF file
// TileWidth 322;
// TileLength 323;
// TileOffsets 324;
// TileByteCounts 325;
// // Section 16: CMYK Images
// InkSet 332;
// NumberOfInks 334;
// InkNames 333;
// DotRange 336;
// TargetPrinter 337;
// // Section 17: HalftoneHints
// HalftoneHints 321;
// // Section 18: Associated Alpha Handling
// ExtraSamples 338;
// // Section 19: Data Sample Format
// SampleFormat 339;
// SMinSampleValue 340;
// SMaxSampleValue 341;
// // Section 20: RGB Image Colorimetry
// WhitePoint 318;
// PrimaryChromaticities 319;
// TransferFunction 301;
// TransferRange 342;
// ReferenceBlackWhite 532;
// // Section 21: YCbCr Images
// }
enum_from_primitive! {
#[derive(Clone, Copy, Debug)]
pub enum Type {
BYTE = 1,
ASCII = 2,
SHORT = 3,
LONG = 4,
RATIONAL = 5,
SBYTE = 6,
UNDEFINED = 7,
SSHORT = 8,
SLONG = 9,
SRATIONAL = 10,
FLOAT = 11,
DOUBLE = 12,
}
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value {
//Signed(i32),
Unsigned(u32),
List(Vec<Value>)
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value_Type {
Value,
Offset
}
impl Value {
pub fn as_u32(self) -> ::image::ImageResult<u32> {
match self {
Unsigned(val) => Ok(val),
val => Err(::image::ImageError::FormatError(format!(
"Expected unsigned integer, {:?} found.", val
)))
}
}
pub fn as_u32_vec(self) -> ::image::ImageResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec.into_iter() {
new_vec.push(try!(v.as_u32()))
}
Ok(new_vec)
},
Unsigned(val) => Ok(vec![val]),
//_ => Err(::image::FormatError("Tag data malformed.".to_string()))
}
}
}
pub struct Entry {
type_: Type,
count: u32,
offset: [u8; 4]
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!("Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_,
self.count,
&self.offset,
// String::from_utf8_lossy ( &self.offset ),
))
}
}
impl Entry {
pub fn new(type_: Type, count: u32, offset: [u8; 4] ) -> Entry {
Entry {
type_: type_,
count: count,
offset: offset,
}
}
/// Returns a mem_reader for the offset/value field
pub fn r(&self, byte_order: ByteOrder) -> SmartReader<io::Cursor<Vec<u8>>> {
SmartReader::wrap(
io::Cursor::new(self.offset.to_vec()),
byte_order
)
}
// Refactor this to remove the dependency on decoder,
pub fn val<R: Read + Seek>(&self, decoder: &mut super::TIFFDecoder<R>)
-> ::image::ImageResult<Value> {
let bo = decoder.byte_order();
match (self.type_, self.count) {
// TODO check if this could give wrong results
// at a different endianess of file/computer.
(Type::BYTE, 1) => Ok(Unsigned(self.offset[0] as u32)),
(Type::SHORT, 1) => Ok(Unsigned(try!(self.r(bo).read_u16()) as u32)),
(Type::SHORT, 2) => {
let mut r = self.r(bo);
Ok(List(vec![
Unsigned(try!(r.read_u16()) as u32),
Unsigned(try!(r.read_u16()) as u32)
]))
},
(Type::SHORT, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0.. n {
v.push(Unsigned(try!(decoder.read_short()) as u32))
}
Ok(List(v))
},
(Type::LONG, 1) => Ok(Unsigned(try!(self.r(bo).read_u32()))),
(Type::LONG, n) => {
let mut v = Vec::with_capacity(n as usize);
try!(decoder.goto_offset(try!(self.r(bo).read_u32())));
for _ in 0.. n {
v.push(Unsigned(try!(decoder.read_long())))
}
Ok(List(v))
}
_ => Err(::image::ImageError::UnsupportedError("Unsupported data type.".to_string()))
}
}
}
/// Type representing an Image File Directory
pub type Directory = HashMap<Tag, Entry>; |
// Note: These tags appear in the order they are mentioned in the TIFF reference
// https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf | random_line_split |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if!buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) { | }
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
} | for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
} | random_line_split |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn | (&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if!buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
}
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
| mapped_reads | identifier_name |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() |
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if!buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
}
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
| {
self.mapped_reads.insert(buffer.clone());
} | conditional_block |
command.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Command Buffer device interface
use std::ops::Deref;
use std::collections::hash_set::{self, HashSet};
use {Resources, IndexType, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor |
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if!buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
}
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffers<'a, R: Resources> {
buffers: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffers<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.buffers.next().map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuardBuffersChain<'a, R: Resources> {
fst: AccessInfoBuffers<'a, R>,
snd: AccessInfoBuffers<'a, R>
}
impl<'a, R: Resources> Iterator for AccessGuardBuffersChain<'a, R> {
type Item = (&'a handle::RawBuffer<R>, &'a mut R::Mapping);
fn next(&mut self) -> Option<Self::Item> {
self.fst.next().or_else(|| self.snd.next())
.map(|buffer| unsafe {
(buffer, buffer.mapping().unwrap().use_access())
})
}
}
| {
ClearColor::Uint([v, 0, 0, 0])
} | identifier_body |
converter.rs | use spirv_cross::{
glsl,
spirv,
};
use shaderc;
use std::{
iter,
path::{Path, PathBuf},
fs::File,
io::Read,
collections::HashMap,
};
use GlslVersion;
use Stage;
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn | () -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array,.. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image {.. } |
spirv::Type::SampledImage {.. } |
spirv::Type::Sampler {.. } |
spirv::Type::AtomicCounter {.. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
where P: AsRef<Path>
{
source_paths.iter()
.filter_map(|path| {
let file_path = path.as_ref().join(name);
if file_path.exists() {
Some(file_path)
} else {
None
}
})
.next()
.ok_or_else(|| format!(
"unable to find shader file `{}` in search paths:\n{}",
name,
source_paths.iter()
.map(|path| format!(" * `{}`", path.as_ref().to_string_lossy()))
.collect::<Vec<_>>()
.join("\n"),
))
}
| default | identifier_name |
converter.rs | use spirv_cross::{
glsl,
spirv,
};
use shaderc;
use std::{
iter,
path::{Path, PathBuf},
fs::File,
io::Read,
collections::HashMap,
};
use GlslVersion;
use Stage;
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn default() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
| },
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array,.. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image {.. } |
spirv::Type::SampledImage {.. } |
spirv::Type::Sampler {.. } |
spirv::Type::AtomicCounter {.. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
where P: AsRef<Path>
{
source_paths.iter()
.filter_map(|path| {
let file_path = path.as_ref().join(name);
if file_path.exists() {
Some(file_path)
} else {
None
}
})
.next()
.ok_or_else(|| format!(
"unable to find shader file `{}` in search paths:\n{}",
name,
source_paths.iter()
.map(|path| format!(" * `{}`", path.as_ref().to_string_lossy()))
.collect::<Vec<_>>()
.join("\n"),
))
}
| {
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false, | identifier_body |
converter.rs | use spirv_cross::{
glsl,
spirv,
};
use shaderc;
use std::{
iter,
path::{Path, PathBuf},
fs::File,
io::Read,
collections::HashMap,
};
use GlslVersion;
use Stage;
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn default() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> { | }))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array,.. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image {.. } |
spirv::Type::SampledImage {.. } |
spirv::Type::Sampler {.. } |
spirv::Type::AtomicCounter {.. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
where P: AsRef<Path>
{
source_paths.iter()
.filter_map(|path| {
let file_path = path.as_ref().join(name);
if file_path.exists() {
Some(file_path)
} else {
None
}
})
.next()
.ok_or_else(|| format!(
"unable to find shader file `{}` in search paths:\n{}",
name,
source_paths.iter()
.map(|path| format!(" * `{}`", path.as_ref().to_string_lossy()))
.collect::<Vec<_>>()
.join("\n"),
))
} | let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path | random_line_split |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() ->! {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if!can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if!can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit()); |
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers | random_line_split |
|
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() ->! | (clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if!can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if!can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers
| {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: "); | identifier_body |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() ->! {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if!can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if!can1.msr.read().inak().bit() |
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers
| {
break;
} | conditional_block |
main.rs | #![no_std]
#![no_main]
// pick a panicking behavior
// extern crate panic_halt; // you can put a breakpoint on `rust_begin_unwind` to catch panics
// extern crate panic_abort; // requires nightly
// extern crate panic_itm; // logs messages over ITM; requires ITM support
extern crate panic_semihosting; // logs messages to the host stderr; requires a debugger
#[macro_use(block)]
extern crate nb;
use numtoa::NumToA;
use cortex_m_rt::entry;
use stm32f4xx_hal as hal;
use crate::hal::rcc::Clocks;
use crate::hal::serial::config::Config;
use stm32f4;
use stm32f4xx_hal::gpio::gpiod::{PD0, PD1, PD8, PD9};
use stm32f4xx_hal::stm32::USART3;
use crate::hal::{prelude::*, serial::Serial, stm32, time::Bps};
#[entry]
fn main() ->! {
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if!can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if!can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn | <X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
//
// Can has 3 modes: Initialization, Normal, Sleep
// Set INRQ bit in CAN_MCR to enter initialization mode
// Wait for INAK bit to be set in CAN_MSR register
// Setup bit timing (CAN_BTR register) and CAN options (CAN_MCR registers)
// Clean INRQ bit to enter normal mode (Must wait for INAK to be cleared after 11 recessive cycles on the bus)
// Silent mode
// Entered by setting SILM bit in the CAN_BTR register
// Does not respond on the bus, perfect for logging
// Loop back mode
// Set LBKM in CAN_BTR register
// Stores transmitted messages in receive mailbox
// Transmit flow
// 1. Find an empty TX mailbox
// 2. Setup identifier, data length code, and data im empty TX mailbox
// 3. Set TXRQ bit in CAN_TIxR register to request the transmission start
// 4. Transmission success indicated by RWCP and TXOK bits set in CAN_TSR register
// Failure indicated by ALST bit in CAN_Tsr for arbitration lost or TERR bit for transmission error
//
// Transmit priority
// Can be set to use identifier priority or FIFO by setting TXFP bit in CAN_MCR register
// Receive Flow
// Received after the message is completed and passed through the identifier filtering
// FMP[1:0] bits in CAN_RFR rigster indicates messages available in the FIFO
// Interrupts can be generated by setting FFIE bit in CAN_IER register
// Read from FIFO output mailbox, release the mailbox using the RFOM bit in CAN_RFR register
// Bit Timing
// Time is split into three segments
// Synchronization
// Segment 1
// Segment 2
// Baud rate = 1 / NominalBitTime
// NominalBitTime = 1 x t_q + t_bs1 + t_bs2
// t_bs1 = t_q x (TS1[3:0] + 1)
// t_bs1 = t_q x (TS2[3:0] + 1)
// t_q = (BRP[9:0] + 1) x t_pclk
// Need to find this
// t_pclk = time period of the APB clock
// CAN is on APB1 which is 50 MHz
// Baud Rate Prescaler = 24 (24 + 1)
// t_q = 0.5us
// t_bs1 = 9 (8 + 1)
// t_bs2 = 6 (5 + 1)
// Filter setup
// Can be setup while in initialization or normal mode
// Must set FINIT bit in CAN_FMR to modify the filters
// CAN reception is deactivated when FINIT = 1
// Notes:
// 1. Hard to tell if there are three or two can controllers
| configure | identifier_name |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) &&!KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if!top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name()!= "auxiliary")
{
let entry = entry?;
if!entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight!= 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?; | let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if!entry.file_type()?.is_dir() && entry.file_name()!= ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i!= 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
} | let mut model: Model = serde_json::from_str(&model_str)?; | random_line_split |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> |
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) &&!KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if!top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name()!= "auxiliary")
{
let entry = entry?;
if!entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight!= 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
let mut model: Model = serde_json::from_str(&model_str)?;
let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if!entry.file_type()?.is_dir() && entry.file_name()!= ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i!= 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
}
| {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
} | identifier_body |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) &&!KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
| lse if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if!top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name()!= "auxiliary")
{
let entry = entry?;
if!entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight!= 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
let mut model: Model = serde_json::from_str(&model_str)?;
let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if!entry.file_type()?.is_dir() && entry.file_name()!= ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i!= 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
}
| prefix
} e | conditional_block |
main.rs | #![feature(array_windows)]
#![feature(format_args_capture)]
#![feature(total_cmp)]
use clap::App;
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use liblinear::util::TrainingInput;
use liblinear::{Builder as LiblinearBuilder, LibLinearModel as _, SolverType};
use regex::Regex;
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use std::borrow::Cow;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::error::Error;
use std::path::Path;
use std::{fs, mem};
enum LexerState {
Start,
ContinueIdent,
ContinuePunct,
}
/// Mapping string-based features to integer indices and back.
#[derive(Default)]
struct FeatureMap {
features: Vec<String>,
map: HashMap<String, u32>,
}
/// Linear SVM model produced by training and used during classification.
#[derive(Serialize, Deserialize)]
struct Model {
features: HashMap<String, u32>,
classes: HashMap<String, Vec<(u32, f64)>>,
}
struct ClassifiedTest {
name: String,
class_scores: Vec<(String, f64)>,
}
impl FeatureMap {
fn intern(&mut self, feature: Cow<str>, read_only: bool) -> Option<u32> {
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if!curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn | (s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) &&!KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if!top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name()!= "auxiliary")
{
let entry = entry?;
if!entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight!= 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
let mut model: Model = serde_json::from_str(&model_str)?;
let mut feature_map = FeatureMap { map: mem::take(&mut model.features), features: Vec::new() };
// Classify tests that are not yet classified using the model.
let mut files = HashMap::default();
for dir in &[&root.join("issues"), root] {
for entry in fs::read_dir(dir)? {
let entry = entry?;
if!entry.file_type()?.is_dir() && entry.file_name()!= ".gitattributes" {
let path = entry.path();
if let Ok(s) = fs::read_to_string(&path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), true);
files.insert(file_name, RefCell::new(features));
}
}
}
}
let mut classified_tests = Vec::new();
for (name, features) in files_to_tests(files) {
let mut model_scores = Vec::new();
for (model_name, weights) in &model.classes {
let score = get_decision_value(weights, &features);
model_scores.push((model_name, score));
}
// Print three classes with highest decision values.
model_scores.sort_by(|(_, sc1), (_, sc2)| sc1.total_cmp(&sc2));
classified_tests.push(ClassifiedTest {
name,
class_scores: model_scores
.into_iter()
.rev()
.take(3)
.map(|(name, score)| (name.clone(), score))
.collect(),
});
}
let re = Regex::new(r"issue-(\d+)").unwrap();
classified_tests.sort_by(|test1, test2| test2.max_score().total_cmp(&test1.max_score()));
for test in classified_tests {
let mut msg = format!(
"- [{}](https://github.com/rust-lang/rust/blob/master/src/test/ui/{}) <sup>",
test.name, test.name
);
let issue = match re.captures(&test.name) {
Some(captures) => {
format!("[issue](https://github.com/rust-lang/rust/issues/{})", &captures[1])
}
None => "unknown".to_string(),
};
msg.push_str(&issue);
msg.push_str("</sup>: ");
for (i, (name, score)) in test.class_scores.iter().enumerate() {
if i!= 0 {
msg.push_str(", ");
}
msg.push_str(&format!("{name} ({score:.3})"));
}
println!("{}", msg);
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
let matches = App::new("UI test classifier")
.args_from_usage(
"--train 'Train the classifier'
--classify 'Classify tests'",
)
.get_matches();
// FIXME: Make it configurable.
let root = Path::new("C:/msys64/home/we/rust/src/test/ui");
if matches.is_present("train") {
train(root)?;
}
if matches.is_present("classify") {
classify(root)?;
}
Ok(())
}
| generalize | identifier_name |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn | () -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc!= 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
}
| errno | identifier_name |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn errno() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc!= 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
/// | pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
} | /// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library | random_line_split |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn errno() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
| /// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc!= 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
}
| unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
| identifier_body |
lib.rs | #![allow(dead_code)]
extern crate libc;
extern crate zmq_ffi;
#[macro_use]
extern crate cfg_if;
mod socket;
mod errno;
pub use socket::*;
pub use errno::*;
use std::ops::{ Deref, DerefMut };
use std::ffi;
use std::vec::Vec;
use std::slice;
use std::mem::transmute;
use libc::{ c_int, c_void, size_t };
pub const ZMQ_VERSION_MAJOR:i32 = 4;
pub const ZMQ_VERSION_MINOR:i32 = 1;
pub const ZMQ_VERSION_PATCH:i32 = 4;
macro_rules! ret_when_null {
($ptr: expr) => {{
if $ptr.is_null() {
return Err(Error::from_last_err());
}
}}
}
#[macro_export]
macro_rules! ZMQ_MAKE_VERSION {
($major: expr, $minor: expr, $patch: expr) => {
{
$major * 10000 + $minor * 100 + $patch
}
}
}
pub const ZMQ_VERSION:i32 = ZMQ_MAKE_VERSION!(
ZMQ_VERSION_MAJOR,
ZMQ_VERSION_MINOR,
ZMQ_VERSION_PATCH
);
fn errno() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
| e {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc!= 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
pub fn has_capability(capability: &str) -> bool {
let capability_cstr = ffi::CString::new(capability).unwrap();
let rc = unsafe { zmq_ffi::zmq_has(capability_cstr.as_ptr()) };
rc == 1
}
// Encryption functions
/* Encode data with Z85 encoding. Returns encoded data */
//ZMQ_EXPORT char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);
/// Encode a binary key as Z85 printable text
///
/// Binding of `char *zmq_z85_encode (char *dest, const uint8_t *data, size_t size);`
///
/// The function will encode the binary block specified by data and size into a string in dest.
/// The size of the binary block must be divisible by 4.
pub fn z85_encode(data: &[u8]) -> Result<String, Error> {
let len = data.len() as i32 * 5 / 4 + 1;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_encode(transmute(dest.as_mut_ptr()), data.as_ptr(), data.len()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
let cstr = ffi::CStr::from_ptr(transmute(dest.as_ptr()));
Ok(String::from_utf8(cstr.to_bytes().to_vec()).unwrap())
}
}
}
/// Decode a binary key from Z85 printable text
///
/// Binding of `uint8_t *zmq_z85_decode (uint8_t *dest, const char *string);`
///
/// The function will decode string into dest. The length of string in bytes shall be divisible by 5
pub fn z85_decode(encoded: &str) -> Result<Vec<u8>, Error> {
let encoded_cstr = ffi::CString::new(encoded).unwrap();
let len = (encoded_cstr.as_bytes().len() as i32 * 4 / 5) as i32;
let mut dest: Vec<u8> = Vec::with_capacity(len as usize);
let rc = unsafe { zmq_ffi::zmq_z85_decode(dest.as_mut_ptr(), encoded_cstr.as_ptr()) };
if rc.is_null() {
Err(Error::from_last_err())
} else {
unsafe {
dest.set_len(len as usize);
}
Ok(dest)
}
}
/// Generate z85-encoded public and private keypair with libsodium.
///
/// Binding of `int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);`
///
/// The function will return a newly generated random keypair consisting of a public key and a secret key.
/// The keys are encoded using z85_encode().
pub fn gen_curve_keypair() -> Result<(String, String), Error> {
let mut public_key: Vec<u8> = Vec::with_capacity(41);
let mut secret_key: Vec<u8> = Vec::with_capacity(41);
let rc = unsafe {
zmq_ffi::zmq_curve_keypair(
transmute(public_key.as_mut_ptr()),
transmute(secret_key.as_mut_ptr())
)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
unsafe {
public_key.set_len(40);
secret_key.set_len(40);
}
Ok((String::from_utf8(public_key).unwrap(), String::from_utf8(secret_key).unwrap()))
}
}
| None
} els | conditional_block |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&&!mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s|!s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn | (
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
}
| get_partitions | identifier_name |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
|
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&&!mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s|!s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn get_partitions(
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
}
| {
return None;
} | conditional_block |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&&!mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s|!s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> |
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn get_partitions(
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
}
| {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
} | identifier_body |
blk_device.rs | //!
//! This module implements the list_block_devices() gRPC method
//! for listing available disk devices on the current host.
//!
//! The relevant information is obtained via udev.
//! The method works by iterating through udev records and selecting block
//! (ie. SUBSYSTEM=block) devices that represent either disks or disk
//! partitions. For each such device, it is then determined as to whether the
//! device is available for use.
//!
//! A device is currently deemed to be "available" if it satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&&!mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s|!s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available, |
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn get_partitions(
parent: Option<&str>,
disk: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_parent(disk)?;
enumerator.match_property("DEVTYPE", "partition")?;
for entry in enumerator.scan_devices()? {
if let Some(device) = new_device(parent, true, &entry, mounts) {
list.push(device);
}
}
Ok(list)
}
/// Return a list of block devices on the current host.
/// The <all> parameter controls whether to return list containing
/// all matching devices, or just those deemed to be available.
pub async fn list_block_devices(all: bool) -> Result<Vec<BlockDevice>, Error> {
let mounts = get_mounts()?;
get_disks(all, &mounts)
} | });
}
None
} | random_line_split |
tpm.rs | <u8>)> {
// Set encryption algorithm
let alg = match alg {
Some(a) => a,
None => {
match config_get(
"cloud_agent",
"tpm_encryption_alg",
)?
.as_str()
{
"rsa" => AsymmetricAlgorithm::Rsa,
"ecc" => AsymmetricAlgorithm::Ecc,
_ => return Err(KeylimeError::Configuration(String::from("Encryption algorithm provided in keylime.conf is not supported")))
}
}
};
// Retrieve EK handle, EK pub cert, and TPM pub object
let handle = ek::create_ek_object(context, alg, DefaultKey)?;
let cert = match ek::retrieve_ek_pubcert(context, alg) {
Ok(v) => Some(v),
Err(_) => {
warn!("No EK certificate found in TPM NVRAM");
None
}
};
let (tpm_pub, _, _) = context.read_public(handle)?;
let tpm_pub_vec = pub_to_vec(tpm_pub);
Ok((handle, cert, tpm_pub_vec))
}
fn unmarshal_tpms_attest(val: &[u8]) -> Result<TPMS_ATTEST> {
let mut resp = TPMS_ATTEST::default();
let mut offset = 0u64;
unsafe {
let res = Tss2_MU_TPMS_ATTEST_Unmarshal(
val[..].as_ptr(),
val.len() as u64,
&mut offset,
&mut resp,
);
if res!= 0 {
panic!("Error converting"); //#[allow_ci]
}
}
Ok(resp)
}
// Multiple TPM objects need to be marshaled for Quoting. This macro will
// create the appropriate functions when called below. The macro is intended
// to help with any future similar marshaling functions.
macro_rules! create_marshal_fn {
($func:ident, $tpmobj:ty, $marshal:ident) => {
fn $func(t: $tpmobj) -> Vec<u8> {
let mut offset = 0u64;
let size = std::mem::size_of::<$tpmobj>();
let mut tpm_vec = Vec::with_capacity(size);
unsafe {
let res = $marshal(
&t,
tpm_vec.as_mut_ptr(),
tpm_vec.capacity() as u64,
&mut offset,
);
if res!= 0 {
panic!("out of memory or invalid data from TPM"); //#[allow_ci]
}
// offset is a buffer, so after marshaling function is called it holds the
// number of bytes written to the vector
tpm_vec.set_len(offset as usize);
}
tpm_vec
}
};
}
// These marshaling functions use the macro above and are based on this format:
// https://github.com/fedora-iot/clevis-pin-tpm2/blob/main/src/tpm_objects.rs#L64
//... and on these marshaling functions:
// https://github.com/parallaxsecond/rust-tss-esapi/blob/main/tss-esapi-sys/src/ \
// bindings/x86_64-unknown-linux-gnu.rs#L16010
//
// Functions can be created using the following form:
// create_marshal_fn!(name_of_function_to_create, struct_to_be_marshaled, marshaling_function);
//
create_marshal_fn!(pub_to_vec, TPM2B_PUBLIC, Tss2_MU_TPM2B_PUBLIC_Marshal);
create_marshal_fn!(
sig_to_vec,
TPMT_SIGNATURE,
Tss2_MU_TPMT_SIGNATURE_Marshal
);
// Recreate how tpm2-tools creates the PCR out file. Roughly, this is a
// TPML_PCR_SELECTION + number of TPML_DIGESTS + TPML_DIGESTs.
// Reference:
// https://github.com/tpm2-software/tpm2-tools/blob/master/tools/tpm2_quote.c#L47-L91
//
// Note: tpm2-tools does not use its own documented marshaling functions for this output,
// so the below code recreates the idiosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic!= TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version!= 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn default() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i)!= 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19, | 23 => PcrSlot::Slot23,
bit => return Err(KeylimeError::Other(format!("malformed mask in integrity quote: only pcrs 0-23 can be included, but mask included pcr {:?}", bit))),
},
)
}
}
Ok(pcrs)
}
// This encodes a quote string as input to Python Keylime's quote checking functionality.
// The quote, signature, and pcr blob are concatenated with ':' separators. To match the
// expected format, the quote, signature, and pcr blob must be individually compressed
// with zlib at the default compression level and then base64 encoded before concatenation.
//
// Reference:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6 \
// /keylime/tpm/tpm_main.py#L964-L975
pub(crate) fn encode_quote_string(
att: TPM2B_ATTEST,
sig: Signature,
pcrs_read: PcrSelectionList,
pcr_data: PcrData,
) -> Result<String> {
// marshal structs to vec in expected formats. these formats are
// dictated by tpm2_tools.
let att_vec = &att.attestationData[0..att.size as usize];
let sig_vec = sig_to_vec(sig.try_into()?);
let pcr_vec = pcrdata_to_vec(pcrs_read, pcr_data);
// zlib compression
let mut att_comp = ZlibEncoder::new(Vec::new(), Compression::default());
att_comp.write_all(att_vec);
let att_comp_finished = att_comp.finish()?;
let mut sig_comp = ZlibEncoder::new(Vec::new(), Compression::default());
sig_comp.write_all(&sig_vec);
let sig_comp_finished = sig_comp.finish()?;
let mut pcr_comp = ZlibEncoder::new(Vec::new(), Compression::default());
pcr_comp.write_all(&pcr_vec);
let pcr_comp_finished = pcr_comp.finish()?;
// base64 encoding
let att_str = base64::encode(att_comp_finished);
let sig_str = base64::encode(sig_comp_finished);
let pcr_str = base64::encode(pcr_comp_finished);
// create concatenated string
let mut quote = String::new();
quote.push_str(&att_str);
quote.push(':');
quote.push_str(&sig_str);
quote.push(':');
quote.push_str(&pcr_str);
Ok(quote)
}
// This function extends Pcr16 with the digest, then creates a PcrList
// from the given mask and pcr16.
// Note: Currently, this will build the list for both SHA256 and SHA1 as
// necessary for the Python components of Keylime.
pub(crate) fn build_pcr_list(
context: &mut Context,
digest: DigestValues,
mask: Option<&str>,
) -> Result<PcrSelectionList> {
// extend digest into pcr16
context.execute_with_nullauth_session(|ctx| {
ctx.pcr_reset(PcrHandle::Pcr16)?;
ctx.pcr_extend(PcrHandle::Pcr16, digest.to_owned())
})?;
// translate mask to vec of pcrs
let mut pcrs = match mask {
Some(m) => read_mask(m)?,
None => Vec::new(),
};
// add pcr16 if it isn't in the vec already
if!pcrs.iter().any(|&pcr| pcr == PcrSlot::Slot16) {
let mut slot16 = vec![PcrSlot::Slot16];
pcrs.append(&mut slot16);
}
let ima_pcr_index = pcrs.iter().position(|&pcr| pcr == PcrSlot::Slot10);
let mut pcrlist = PcrSelectionListBuilder::new();
// remove IMA pcr before selecting for sha256 bank
if let Some(ima_pcr_index) = ima_pcr_index {
let _ = pcrs.remove(ima_pcr_index);
// add only IMA pcr for sha1 bank
let mut sha1_pcrs = vec![PcrSlot::Slot10];
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha1, &sha1_pcrs);
}
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha256, &pcrs);
let pcrlist = pcrlist.build();
Ok(pcrlist)
}
// The pcr blob corresponds to the pcr out file that records the list of PCR values,
// specified by tpm2tools, ex. 'tpm2_quote... -o <pcrfilename>'. Read more here:
// https://github.com/tpm2-software/tpm2-tools/blob/master/man/tpm2_quote.1.md
//
// It is required by Python Keylime's check_quote functionality. For how the quote is
// checked, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L990
//
// For how the quote is created, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L965
//
pub(crate) fn make_pcr_blob(
context: &mut Context,
pcrlist: PcrSelectionList,
) -> Result<(PcrSelectionList, PcrData)> {
let (_, pcrs_read, pcr_data) =
context.execute_without_session(|ctx| ctx.pcr_read(&pcrlist))?;
if pcrs_read!= pcrlist {
return Err(KeylimeError::Other(format!(
"could not read all pcrs; requested: {:?}, read: {:?}",
pcrlist, pcrs_read
)));
}
Ok((pcrs_read, pcr_data))
}
// Takes a TSS ESAPI HashingAlgorithm and returns the corresponding OpenSSL
// MessageDigest.
fn hash_alg_to_message_digest(
hash_alg: HashingAlgorithm,
) -> Result<MessageDigest> {
match hash_alg {
HashingAlgorithm::Sha256 => Ok(MessageDigest::sha256()),
HashingAlgorithm::Sha1 => Ok(MessageDigest::sha1()),
other => Err(KeylimeError::Other(format!(
"Unsupported hashing algorithm: {:?}",
other
))),
}
}
fn check_if_pcr_data_and_attestation_match(
hash_algo: HashingAlgorithm,
pcr_data: &PcrData,
attestation: &TPM2B_ATTEST,
) -> Result<bool> {
let pcr_data = TPML_DIGEST::try_from(pcr_data.clone())?;
let attestation = unmarshal_tpms_attest(&attestation.attestationData)?;
if attestation.type_!= TPM2_ST_ATTEST_QUOTE {
return Err(KeylimeError::Other(format!(
"Expected attestation type TPM2_ST_ATTEST_QUOTE, got {:?}",
attestation.type_
)));
}
let quote = unsafe { attestation.attested.quote };
let attested_pcr = quote.pcrDigest;
let attested_pcr = &attested_pcr.buffer[..attested_pcr.size as usize];
let mut hasher = Hasher::new(hash_alg_to_message_digest(hash_algo)?)?;
for i in 0..pcr_data.count {
let pcr = pcr_data.digests[i as usize];
hasher.update(&pcr.buffer[..pcr.size as usize])?;
}
let pcr_digest = hasher.finish()?;
log::trace!(
"Attested to PCR digest: {:?}, read PCR digest: {:?}",
attested_pcr,
pcr_digest,
);
Ok(memcmp::eq(attested_pcr, &pcr_digest))
}
const NUM_ATTESTATION_ATTEMPTS: i32 = 5;
fn perform_quote_and_pcr_read(
mut context: &mut Context,
ak_handle: KeyHandle,
nonce: &[u8],
pcrlist: PcrSelectionList,
) -> Result<(TPM2B_ATTEST, Signature, PcrSelectionList, PcrData)> {
let sig_scheme = get_sig_scheme(TpmSigScheme::default())?;
let nonce = nonce.try_into()?;
for attempt in 0..NUM_ATTESTATION_ATTEMPTS { | 20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22, | random_line_split |
tpm.rs | u8>)> {
// Set encryption algorithm
let alg = match alg {
Some(a) => a,
None => {
match config_get(
"cloud_agent",
"tpm_encryption_alg",
)?
.as_str()
{
"rsa" => AsymmetricAlgorithm::Rsa,
"ecc" => AsymmetricAlgorithm::Ecc,
_ => return Err(KeylimeError::Configuration(String::from("Encryption algorithm provided in keylime.conf is not supported")))
}
}
};
// Retrieve EK handle, EK pub cert, and TPM pub object
let handle = ek::create_ek_object(context, alg, DefaultKey)?;
let cert = match ek::retrieve_ek_pubcert(context, alg) {
Ok(v) => Some(v),
Err(_) => {
warn!("No EK certificate found in TPM NVRAM");
None
}
};
let (tpm_pub, _, _) = context.read_public(handle)?;
let tpm_pub_vec = pub_to_vec(tpm_pub);
Ok((handle, cert, tpm_pub_vec))
}
fn unmarshal_tpms_attest(val: &[u8]) -> Result<TPMS_ATTEST> {
let mut resp = TPMS_ATTEST::default();
let mut offset = 0u64;
unsafe {
let res = Tss2_MU_TPMS_ATTEST_Unmarshal(
val[..].as_ptr(),
val.len() as u64,
&mut offset,
&mut resp,
);
if res!= 0 {
panic!("Error converting"); //#[allow_ci]
}
}
Ok(resp)
}
// Multiple TPM objects need to be marshaled for Quoting. This macro will
// create the appropriate functions when called below. The macro is intended
// to help with any future similar marshaling functions.
macro_rules! create_marshal_fn {
($func:ident, $tpmobj:ty, $marshal:ident) => {
fn $func(t: $tpmobj) -> Vec<u8> {
let mut offset = 0u64;
let size = std::mem::size_of::<$tpmobj>();
let mut tpm_vec = Vec::with_capacity(size);
unsafe {
let res = $marshal(
&t,
tpm_vec.as_mut_ptr(),
tpm_vec.capacity() as u64,
&mut offset,
);
if res!= 0 {
panic!("out of memory or invalid data from TPM"); //#[allow_ci]
}
// offset is a buffer, so after marshaling function is called it holds the
// number of bytes written to the vector
tpm_vec.set_len(offset as usize);
}
tpm_vec
}
};
}
// These marshaling functions use the macro above and are based on this format:
// https://github.com/fedora-iot/clevis-pin-tpm2/blob/main/src/tpm_objects.rs#L64
//... and on these marshaling functions:
// https://github.com/parallaxsecond/rust-tss-esapi/blob/main/tss-esapi-sys/src/ \
// bindings/x86_64-unknown-linux-gnu.rs#L16010
//
// Functions can be created using the following form:
// create_marshal_fn!(name_of_function_to_create, struct_to_be_marshaled, marshaling_function);
//
create_marshal_fn!(pub_to_vec, TPM2B_PUBLIC, Tss2_MU_TPM2B_PUBLIC_Marshal);
create_marshal_fn!(
sig_to_vec,
TPMT_SIGNATURE,
Tss2_MU_TPMT_SIGNATURE_Marshal
);
// Recreate how tpm2-tools creates the PCR out file. Roughly, this is a
// TPML_PCR_SELECTION + number of TPML_DIGESTS + TPML_DIGESTs.
// Reference:
// https://github.com/tpm2-software/tpm2-tools/blob/master/tools/tpm2_quote.c#L47-L91
//
// Note: tpm2-tools does not use its own documented marshaling functions for this output,
// so the below code recreates the idiosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic!= TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version!= 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn default() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> | hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i)!= 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19,
20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22,
23 => PcrSlot::Slot23,
bit => return Err(KeylimeError::Other(format!("malformed mask in integrity quote: only pcrs 0-23 can be included, but mask included pcr {:?}", bit))),
},
)
}
}
Ok(pcrs)
}
// This encodes a quote string as input to Python Keylime's quote checking functionality.
// The quote, signature, and pcr blob are concatenated with ':' separators. To match the
// expected format, the quote, signature, and pcr blob must be individually compressed
// with zlib at the default compression level and then base64 encoded before concatenation.
//
// Reference:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6 \
// /keylime/tpm/tpm_main.py#L964-L975
pub(crate) fn encode_quote_string(
att: TPM2B_ATTEST,
sig: Signature,
pcrs_read: PcrSelectionList,
pcr_data: PcrData,
) -> Result<String> {
// marshal structs to vec in expected formats. these formats are
// dictated by tpm2_tools.
let att_vec = &att.attestationData[0..att.size as usize];
let sig_vec = sig_to_vec(sig.try_into()?);
let pcr_vec = pcrdata_to_vec(pcrs_read, pcr_data);
// zlib compression
let mut att_comp = ZlibEncoder::new(Vec::new(), Compression::default());
att_comp.write_all(att_vec);
let att_comp_finished = att_comp.finish()?;
let mut sig_comp = ZlibEncoder::new(Vec::new(), Compression::default());
sig_comp.write_all(&sig_vec);
let sig_comp_finished = sig_comp.finish()?;
let mut pcr_comp = ZlibEncoder::new(Vec::new(), Compression::default());
pcr_comp.write_all(&pcr_vec);
let pcr_comp_finished = pcr_comp.finish()?;
// base64 encoding
let att_str = base64::encode(att_comp_finished);
let sig_str = base64::encode(sig_comp_finished);
let pcr_str = base64::encode(pcr_comp_finished);
// create concatenated string
let mut quote = String::new();
quote.push_str(&att_str);
quote.push(':');
quote.push_str(&sig_str);
quote.push(':');
quote.push_str(&pcr_str);
Ok(quote)
}
// This function extends Pcr16 with the digest, then creates a PcrList
// from the given mask and pcr16.
// Note: Currently, this will build the list for both SHA256 and SHA1 as
// necessary for the Python components of Keylime.
pub(crate) fn build_pcr_list(
context: &mut Context,
digest: DigestValues,
mask: Option<&str>,
) -> Result<PcrSelectionList> {
// extend digest into pcr16
context.execute_with_nullauth_session(|ctx| {
ctx.pcr_reset(PcrHandle::Pcr16)?;
ctx.pcr_extend(PcrHandle::Pcr16, digest.to_owned())
})?;
// translate mask to vec of pcrs
let mut pcrs = match mask {
Some(m) => read_mask(m)?,
None => Vec::new(),
};
// add pcr16 if it isn't in the vec already
if!pcrs.iter().any(|&pcr| pcr == PcrSlot::Slot16) {
let mut slot16 = vec![PcrSlot::Slot16];
pcrs.append(&mut slot16);
}
let ima_pcr_index = pcrs.iter().position(|&pcr| pcr == PcrSlot::Slot10);
let mut pcrlist = PcrSelectionListBuilder::new();
// remove IMA pcr before selecting for sha256 bank
if let Some(ima_pcr_index) = ima_pcr_index {
let _ = pcrs.remove(ima_pcr_index);
// add only IMA pcr for sha1 bank
let mut sha1_pcrs = vec![PcrSlot::Slot10];
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha1, &sha1_pcrs);
}
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha256, &pcrs);
let pcrlist = pcrlist.build();
Ok(pcrlist)
}
// The pcr blob corresponds to the pcr out file that records the list of PCR values,
// specified by tpm2tools, ex. 'tpm2_quote... -o <pcrfilename>'. Read more here:
// https://github.com/tpm2-software/tpm2-tools/blob/master/man/tpm2_quote.1.md
//
// It is required by Python Keylime's check_quote functionality. For how the quote is
// checked, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L990
//
// For how the quote is created, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L965
//
pub(crate) fn make_pcr_blob(
context: &mut Context,
pcrlist: PcrSelectionList,
) -> Result<(PcrSelectionList, PcrData)> {
let (_, pcrs_read, pcr_data) =
context.execute_without_session(|ctx| ctx.pcr_read(&pcrlist))?;
if pcrs_read!= pcrlist {
return Err(KeylimeError::Other(format!(
"could not read all pcrs; requested: {:?}, read: {:?}",
pcrlist, pcrs_read
)));
}
Ok((pcrs_read, pcr_data))
}
// Takes a TSS ESAPI HashingAlgorithm and returns the corresponding OpenSSL
// MessageDigest.
fn hash_alg_to_message_digest(
hash_alg: HashingAlgorithm,
) -> Result<MessageDigest> {
match hash_alg {
HashingAlgorithm::Sha256 => Ok(MessageDigest::sha256()),
HashingAlgorithm::Sha1 => Ok(MessageDigest::sha1()),
other => Err(KeylimeError::Other(format!(
"Unsupported hashing algorithm: {:?}",
other
))),
}
}
fn check_if_pcr_data_and_attestation_match(
hash_algo: HashingAlgorithm,
pcr_data: &PcrData,
attestation: &TPM2B_ATTEST,
) -> Result<bool> {
let pcr_data = TPML_DIGEST::try_from(pcr_data.clone())?;
let attestation = unmarshal_tpms_attest(&attestation.attestationData)?;
if attestation.type_!= TPM2_ST_ATTEST_QUOTE {
return Err(KeylimeError::Other(format!(
"Expected attestation type TPM2_ST_ATTEST_QUOTE, got {:?}",
attestation.type_
)));
}
let quote = unsafe { attestation.attested.quote };
let attested_pcr = quote.pcrDigest;
let attested_pcr = &attested_pcr.buffer[..attested_pcr.size as usize];
let mut hasher = Hasher::new(hash_alg_to_message_digest(hash_algo)?)?;
for i in 0..pcr_data.count {
let pcr = pcr_data.digests[i as usize];
hasher.update(&pcr.buffer[..pcr.size as usize])?;
}
let pcr_digest = hasher.finish()?;
log::trace!(
"Attested to PCR digest: {:?}, read PCR digest: {:?}",
attested_pcr,
pcr_digest,
);
Ok(memcmp::eq(attested_pcr, &pcr_digest))
}
const NUM_ATTESTATION_ATTEMPTS: i32 = 5;
fn perform_quote_and_pcr_read(
mut context: &mut Context,
ak_handle: KeyHandle,
nonce: &[u8],
pcrlist: PcrSelectionList,
) -> Result<(TPM2B_ATTEST, Signature, PcrSelectionList, PcrData)> {
let sig_scheme = get_sig_scheme(TpmSigScheme::default())?;
let nonce = nonce.try_into()?;
for attempt in 0..NUM_ATTESTATION_ATTEMP | {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new(); | identifier_body |
tpm.rs | u8>)> {
// Set encryption algorithm
let alg = match alg {
Some(a) => a,
None => {
match config_get(
"cloud_agent",
"tpm_encryption_alg",
)?
.as_str()
{
"rsa" => AsymmetricAlgorithm::Rsa,
"ecc" => AsymmetricAlgorithm::Ecc,
_ => return Err(KeylimeError::Configuration(String::from("Encryption algorithm provided in keylime.conf is not supported")))
}
}
};
// Retrieve EK handle, EK pub cert, and TPM pub object
let handle = ek::create_ek_object(context, alg, DefaultKey)?;
let cert = match ek::retrieve_ek_pubcert(context, alg) {
Ok(v) => Some(v),
Err(_) => {
warn!("No EK certificate found in TPM NVRAM");
None
}
};
let (tpm_pub, _, _) = context.read_public(handle)?;
let tpm_pub_vec = pub_to_vec(tpm_pub);
Ok((handle, cert, tpm_pub_vec))
}
fn unmarshal_tpms_attest(val: &[u8]) -> Result<TPMS_ATTEST> {
let mut resp = TPMS_ATTEST::default();
let mut offset = 0u64;
unsafe {
let res = Tss2_MU_TPMS_ATTEST_Unmarshal(
val[..].as_ptr(),
val.len() as u64,
&mut offset,
&mut resp,
);
if res!= 0 {
panic!("Error converting"); //#[allow_ci]
}
}
Ok(resp)
}
// Multiple TPM objects need to be marshaled for Quoting. This macro will
// create the appropriate functions when called below. The macro is intended
// to help with any future similar marshaling functions.
macro_rules! create_marshal_fn {
($func:ident, $tpmobj:ty, $marshal:ident) => {
fn $func(t: $tpmobj) -> Vec<u8> {
let mut offset = 0u64;
let size = std::mem::size_of::<$tpmobj>();
let mut tpm_vec = Vec::with_capacity(size);
unsafe {
let res = $marshal(
&t,
tpm_vec.as_mut_ptr(),
tpm_vec.capacity() as u64,
&mut offset,
);
if res!= 0 {
panic!("out of memory or invalid data from TPM"); //#[allow_ci]
}
// offset is a buffer, so after marshaling function is called it holds the
// number of bytes written to the vector
tpm_vec.set_len(offset as usize);
}
tpm_vec
}
};
}
// These marshaling functions use the macro above and are based on this format:
// https://github.com/fedora-iot/clevis-pin-tpm2/blob/main/src/tpm_objects.rs#L64
//... and on these marshaling functions:
// https://github.com/parallaxsecond/rust-tss-esapi/blob/main/tss-esapi-sys/src/ \
// bindings/x86_64-unknown-linux-gnu.rs#L16010
//
// Functions can be created using the following form:
// create_marshal_fn!(name_of_function_to_create, struct_to_be_marshaled, marshaling_function);
//
create_marshal_fn!(pub_to_vec, TPM2B_PUBLIC, Tss2_MU_TPM2B_PUBLIC_Marshal);
create_marshal_fn!(
sig_to_vec,
TPMT_SIGNATURE,
Tss2_MU_TPMT_SIGNATURE_Marshal
);
// Recreate how tpm2-tools creates the PCR out file. Roughly, this is a
// TPML_PCR_SELECTION + number of TPML_DIGESTS + TPML_DIGESTs.
// Reference:
// https://github.com/tpm2-software/tpm2-tools/blob/master/tools/tpm2_quote.c#L47-L91
//
// Note: tpm2-tools does not use its own documented marshaling functions for this output,
// so the below code recreates the idiosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic!= TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version!= 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn | () -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i)!= 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19,
20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22,
23 => PcrSlot::Slot23,
bit => return Err(KeylimeError::Other(format!("malformed mask in integrity quote: only pcrs 0-23 can be included, but mask included pcr {:?}", bit))),
},
)
}
}
Ok(pcrs)
}
// This encodes a quote string as input to Python Keylime's quote checking functionality.
// The quote, signature, and pcr blob are concatenated with ':' separators. To match the
// expected format, the quote, signature, and pcr blob must be individually compressed
// with zlib at the default compression level and then base64 encoded before concatenation.
//
// Reference:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6 \
// /keylime/tpm/tpm_main.py#L964-L975
pub(crate) fn encode_quote_string(
att: TPM2B_ATTEST,
sig: Signature,
pcrs_read: PcrSelectionList,
pcr_data: PcrData,
) -> Result<String> {
// marshal structs to vec in expected formats. these formats are
// dictated by tpm2_tools.
let att_vec = &att.attestationData[0..att.size as usize];
let sig_vec = sig_to_vec(sig.try_into()?);
let pcr_vec = pcrdata_to_vec(pcrs_read, pcr_data);
// zlib compression
let mut att_comp = ZlibEncoder::new(Vec::new(), Compression::default());
att_comp.write_all(att_vec);
let att_comp_finished = att_comp.finish()?;
let mut sig_comp = ZlibEncoder::new(Vec::new(), Compression::default());
sig_comp.write_all(&sig_vec);
let sig_comp_finished = sig_comp.finish()?;
let mut pcr_comp = ZlibEncoder::new(Vec::new(), Compression::default());
pcr_comp.write_all(&pcr_vec);
let pcr_comp_finished = pcr_comp.finish()?;
// base64 encoding
let att_str = base64::encode(att_comp_finished);
let sig_str = base64::encode(sig_comp_finished);
let pcr_str = base64::encode(pcr_comp_finished);
// create concatenated string
let mut quote = String::new();
quote.push_str(&att_str);
quote.push(':');
quote.push_str(&sig_str);
quote.push(':');
quote.push_str(&pcr_str);
Ok(quote)
}
// This function extends Pcr16 with the digest, then creates a PcrList
// from the given mask and pcr16.
// Note: Currently, this will build the list for both SHA256 and SHA1 as
// necessary for the Python components of Keylime.
pub(crate) fn build_pcr_list(
context: &mut Context,
digest: DigestValues,
mask: Option<&str>,
) -> Result<PcrSelectionList> {
// extend digest into pcr16
context.execute_with_nullauth_session(|ctx| {
ctx.pcr_reset(PcrHandle::Pcr16)?;
ctx.pcr_extend(PcrHandle::Pcr16, digest.to_owned())
})?;
// translate mask to vec of pcrs
let mut pcrs = match mask {
Some(m) => read_mask(m)?,
None => Vec::new(),
};
// add pcr16 if it isn't in the vec already
if!pcrs.iter().any(|&pcr| pcr == PcrSlot::Slot16) {
let mut slot16 = vec![PcrSlot::Slot16];
pcrs.append(&mut slot16);
}
let ima_pcr_index = pcrs.iter().position(|&pcr| pcr == PcrSlot::Slot10);
let mut pcrlist = PcrSelectionListBuilder::new();
// remove IMA pcr before selecting for sha256 bank
if let Some(ima_pcr_index) = ima_pcr_index {
let _ = pcrs.remove(ima_pcr_index);
// add only IMA pcr for sha1 bank
let mut sha1_pcrs = vec![PcrSlot::Slot10];
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha1, &sha1_pcrs);
}
pcrlist = pcrlist.with_selection(HashingAlgorithm::Sha256, &pcrs);
let pcrlist = pcrlist.build();
Ok(pcrlist)
}
// The pcr blob corresponds to the pcr out file that records the list of PCR values,
// specified by tpm2tools, ex. 'tpm2_quote... -o <pcrfilename>'. Read more here:
// https://github.com/tpm2-software/tpm2-tools/blob/master/man/tpm2_quote.1.md
//
// It is required by Python Keylime's check_quote functionality. For how the quote is
// checked, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L990
//
// For how the quote is created, see:
// https://github.com/keylime/keylime/blob/2dd9e5c968f33bf77110092af9268d13db1806c6/ \
// keylime/tpm/tpm_main.py#L965
//
pub(crate) fn make_pcr_blob(
context: &mut Context,
pcrlist: PcrSelectionList,
) -> Result<(PcrSelectionList, PcrData)> {
let (_, pcrs_read, pcr_data) =
context.execute_without_session(|ctx| ctx.pcr_read(&pcrlist))?;
if pcrs_read!= pcrlist {
return Err(KeylimeError::Other(format!(
"could not read all pcrs; requested: {:?}, read: {:?}",
pcrlist, pcrs_read
)));
}
Ok((pcrs_read, pcr_data))
}
// Takes a TSS ESAPI HashingAlgorithm and returns the corresponding OpenSSL
// MessageDigest.
fn hash_alg_to_message_digest(
hash_alg: HashingAlgorithm,
) -> Result<MessageDigest> {
match hash_alg {
HashingAlgorithm::Sha256 => Ok(MessageDigest::sha256()),
HashingAlgorithm::Sha1 => Ok(MessageDigest::sha1()),
other => Err(KeylimeError::Other(format!(
"Unsupported hashing algorithm: {:?}",
other
))),
}
}
fn check_if_pcr_data_and_attestation_match(
hash_algo: HashingAlgorithm,
pcr_data: &PcrData,
attestation: &TPM2B_ATTEST,
) -> Result<bool> {
let pcr_data = TPML_DIGEST::try_from(pcr_data.clone())?;
let attestation = unmarshal_tpms_attest(&attestation.attestationData)?;
if attestation.type_!= TPM2_ST_ATTEST_QUOTE {
return Err(KeylimeError::Other(format!(
"Expected attestation type TPM2_ST_ATTEST_QUOTE, got {:?}",
attestation.type_
)));
}
let quote = unsafe { attestation.attested.quote };
let attested_pcr = quote.pcrDigest;
let attested_pcr = &attested_pcr.buffer[..attested_pcr.size as usize];
let mut hasher = Hasher::new(hash_alg_to_message_digest(hash_algo)?)?;
for i in 0..pcr_data.count {
let pcr = pcr_data.digests[i as usize];
hasher.update(&pcr.buffer[..pcr.size as usize])?;
}
let pcr_digest = hasher.finish()?;
log::trace!(
"Attested to PCR digest: {:?}, read PCR digest: {:?}",
attested_pcr,
pcr_digest,
);
Ok(memcmp::eq(attested_pcr, &pcr_digest))
}
const NUM_ATTESTATION_ATTEMPTS: i32 = 5;
fn perform_quote_and_pcr_read(
mut context: &mut Context,
ak_handle: KeyHandle,
nonce: &[u8],
pcrlist: PcrSelectionList,
) -> Result<(TPM2B_ATTEST, Signature, PcrSelectionList, PcrData)> {
let sig_scheme = get_sig_scheme(TpmSigScheme::default())?;
let nonce = nonce.try_into()?;
for attempt in 0..NUM_ATTESTATION_ATTEMP | default | identifier_name |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
|
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
}
| {
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
} | identifier_body |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(), | pub fn build_coinbase<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
} | })
}
/// Build a coinbase transaction | random_line_split |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn | () -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
}
| check_version | identifier_name |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate |
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T:?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
}
| {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
} | conditional_block |
renderer.rs | use crate::fft::*;
use crate::Opt;
use anyhow::Result;
use rustfft::num_traits::Zero;
use std::{fs::File, io::Read, path::PathBuf, slice};
use wgpu::util::DeviceExt;
use winit::{event::*, window::Window};
#[repr(transparent)]
#[derive(Copy, Clone)]
struct PodComplex(FftSample);
unsafe impl bytemuck::Zeroable for PodComplex {}
/// Safety: Complex<f32> is a repr(C) struct of two f32, and has alignment 4.
unsafe impl bytemuck::Pod for PodComplex {}
// PodComplex is casted to vec2 and requires alignment 8 when sent to the GPU.
// This is not a problem as long as the start position within the Buffer is aligned.
type PodVec = Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is???.
/// The real FFT produces??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask:!0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface, | sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice) {
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
}
pub fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline); // 2.
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..6, 0..1); // 3.
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
}
} | device,
queue, | random_line_split |
renderer.rs | use crate::fft::*;
use crate::Opt;
use anyhow::Result;
use rustfft::num_traits::Zero;
use std::{fs::File, io::Read, path::PathBuf, slice};
use wgpu::util::DeviceExt;
use winit::{event::*, window::Window};
#[repr(transparent)]
#[derive(Copy, Clone)]
struct PodComplex(FftSample);
unsafe impl bytemuck::Zeroable for PodComplex {}
/// Safety: Complex<f32> is a repr(C) struct of two f32, and has alignment 4.
unsafe impl bytemuck::Pod for PodComplex {}
// PodComplex is casted to vec2 and requires alignment 8 when sent to the GPU.
// This is not a problem as long as the start position within the Buffer is aligned.
type PodVec = Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is???.
/// The real FFT produces??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask:!0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice) |
pub fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline); // 2.
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..6, 0..1); // 3.
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
}
}
| {
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
} | identifier_body |
renderer.rs | use crate::fft::*;
use crate::Opt;
use anyhow::Result;
use rustfft::num_traits::Zero;
use std::{fs::File, io::Read, path::PathBuf, slice};
use wgpu::util::DeviceExt;
use winit::{event::*, window::Window};
#[repr(transparent)]
#[derive(Copy, Clone)]
struct PodComplex(FftSample);
unsafe impl bytemuck::Zeroable for PodComplex {}
/// Safety: Complex<f32> is a repr(C) struct of two f32, and has alignment 4.
unsafe impl bytemuck::Pod for PodComplex {}
// PodComplex is casted to vec2 and requires alignment 8 when sent to the GPU.
// This is not a problem as long as the start position within the Buffer is aligned.
type PodVec = Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is???.
/// The real FFT produces??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask:!0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn | (&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice) {
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
}
pub fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline); // 2.
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..6, 0..1); // 3.
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
}
}
| resize | identifier_name |
error.rs | //! Errorand Result types.
use crate::database::Database;
use crate::types::Type;
use std::any::type_name;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
#[allow(unused_macros)]
macro_rules! decode_err {
($s:literal, $($args:tt)*) => {
crate::Error::Decode(format!($s, $($args)*).into())
};
($expr:expr) => {
crate::Error::decode($expr)
};
}
/// A specialized `Result` type for rbatis_core.
pub type Result<T> = std::result::Result<T, Error>;
/// A generic error that represents all the ways a method can fail inside of rbatis_core.
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// Default Error
E(String),
/// Error communicating with the database.
Io(io::Error),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No row was returned during [`query::Map::fetch_one`] or `QueryAs::fetch_one`.
///
/// [`query::Map::fetch_one`]: crate::query::Map::fetch_one
RowNotFound,
/// Column was not found by name in a Row (during [`Row::get`]).
///
/// [`Row::get`]: crate::row::Row::get
ColumnNotFound(Box<str>),
/// Column index was out of bounds (e.g., asking for column 4 in a 2-column row).
ColumnIndexOutOfBounds { index: usize, len: usize },
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a rbatis_core driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync +'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn mismatched_types<DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError +'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync +'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync +'static);
| #[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync +'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v.to_string())
}
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = deserializer.deserialize_string(ErrorVisitor)?;
return Ok(Error::from(r));
}
}
#[test]
fn test_json_error(){
let e=Error::from("fuck");
let s= serde_json::to_string(&e).unwrap();
println!("{}",s.as_str());
let new_e:Error=serde_json::from_str(s.as_str()).unwrap();
} | #[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
| random_line_split |
error.rs | //! Errorand Result types.
use crate::database::Database;
use crate::types::Type;
use std::any::type_name;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
#[allow(unused_macros)]
macro_rules! decode_err {
($s:literal, $($args:tt)*) => {
crate::Error::Decode(format!($s, $($args)*).into())
};
($expr:expr) => {
crate::Error::decode($expr)
};
}
/// A specialized `Result` type for rbatis_core.
pub type Result<T> = std::result::Result<T, Error>;
/// A generic error that represents all the ways a method can fail inside of rbatis_core.
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// Default Error
E(String),
/// Error communicating with the database.
Io(io::Error),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No row was returned during [`query::Map::fetch_one`] or `QueryAs::fetch_one`.
///
/// [`query::Map::fetch_one`]: crate::query::Map::fetch_one
RowNotFound,
/// Column was not found by name in a Row (during [`Row::get`]).
///
/// [`Row::get`]: crate::row::Row::get
ColumnNotFound(Box<str>),
/// Column index was out of bounds (e.g., asking for column 4 in a 2-column row).
ColumnIndexOutOfBounds { index: usize, len: usize },
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a rbatis_core driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync +'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn mismatched_types<DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError +'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync +'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync +'static);
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync +'static);
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync +'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
|
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = deserializer.deserialize_string(ErrorVisitor)?;
return Ok(Error::from(r));
}
}
#[test]
fn test_json_error(){
let e=Error::from("fuck");
let s= serde_json::to_string(&e).unwrap();
println!("{}",s.as_str());
let new_e:Error=serde_json::from_str(s.as_str()).unwrap();
} | {
Ok(v.to_string())
} | identifier_body |
error.rs | //! Errorand Result types.
use crate::database::Database;
use crate::types::Type;
use std::any::type_name;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
#[allow(unused_macros)]
macro_rules! decode_err {
($s:literal, $($args:tt)*) => {
crate::Error::Decode(format!($s, $($args)*).into())
};
($expr:expr) => {
crate::Error::decode($expr)
};
}
/// A specialized `Result` type for rbatis_core.
pub type Result<T> = std::result::Result<T, Error>;
/// A generic error that represents all the ways a method can fail inside of rbatis_core.
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// Default Error
E(String),
/// Error communicating with the database.
Io(io::Error),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No row was returned during [`query::Map::fetch_one`] or `QueryAs::fetch_one`.
///
/// [`query::Map::fetch_one`]: crate::query::Map::fetch_one
RowNotFound,
/// Column was not found by name in a Row (during [`Row::get`]).
///
/// [`Row::get`]: crate::row::Row::get
ColumnNotFound(Box<str>),
/// Column index was out of bounds (e.g., asking for column 4 in a 2-column row).
ColumnIndexOutOfBounds { index: usize, len: usize },
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a rbatis_core driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync +'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn | <DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError +'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync +'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync +'static);
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync +'static);
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync +'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v.to_string())
}
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = deserializer.deserialize_string(ErrorVisitor)?;
return Ok(Error::from(r));
}
}
#[test]
fn test_json_error(){
let e=Error::from("fuck");
let s= serde_json::to_string(&e).unwrap();
println!("{}",s.as_str());
let new_e:Error=serde_json::from_str(s.as_str()).unwrap();
} | mismatched_types | identifier_name |
emitter.rs | extern crate xml;
use polyfill;
use std::str::FromStr;
use std::collections::BTreeMap;
/// Parse a float with appropriate panic message on failure.
macro_rules! parse_float {
($s:expr) => (f64::from_str($s).expect("Failed to parse float"))
}
pub struct Color {
r: u8, g: u8, b: u8
}
impl Color {
pub fn black() -> Color { Color { r: 0, g: 0, b: 0 } }
// pub fn white() -> Color { Color { r: u8::max_value(), g: u8::max_value(), b: u8::max_value() } }
pub fn parse(hexcode: &str) -> Color {
let mut chars = hexcode.chars();
// Skip over the leading '#'
{
let hash = chars.next().expect("Empty string passed as color hexcode");
assert_eq!(hash, '#');
}
// Grab 2 chars, parse those as hexadecimal
let mut next_component = || {
let a = chars.next().expect("Ran out of chars while parsing color hexcode");
let b = chars.next().expect("Ran out of chars while parsing color hexcode");
let ab = String::from_utf8(vec![a as u8, b as u8]).expect("What");
u8::from_str_radix(&ab, 16).expect("Invalid hex number in color string")
};
let r = next_component();
let g = next_component();
let b = next_component();
Color { r:r, g:g, b:b }
}
pub fn emit(&self) -> String {
format!(
"cocos2d::Color4F(cocos2d::Color3B({},{},{}))",
self.r, self.g, self.b
)
}
}
trait Shape {
/// Should generate code to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if!expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
///
/// Parse a rect with origin at the bottom right (??)
///
pub fn | (&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"x" => params.0 = Some(attr.value.clone()),
"y" => params.1 = Some(attr.value.clone()),
"width" => params.2 = Some(attr.value.clone()),
"height" => params.3 = Some(attr.value.clone()),
"style" => params.4 = Some(attr.value.clone()),
_ => {}
}
if let (Some(x), Some(y), Some(w), Some(h), Some(style)) = params {
let mut xy = [0.0, 0.0];
self.assign_position_from_origin(&mut xy, &x, &y);
let fw = parse_float!(&w);
let fh = parse_float!(&h);
return Rect {
x: xy[0], y: xy[1] - fh,
w: fw, h: fh,
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
}
}
}
panic!("Invalid rect");
}
// =========== Private helper functions ===========
fn parse_color_from_style(style: &str) -> Option<Color> {
let exprs = style.split(';');
for ref expr in exprs {
let mut keyval = expr.split(':');
let key = keyval.next().expect("Invalid style entry");
if key == "fill" {
let val = keyval.next().expect("Invalid style entry");
return Some(Color::parse(val));
}
}
None
}
fn varname(id: &str, drawnode: &str) -> String {
if drawnode.contains("{id}") {
drawnode.to_owned().replace("{id}", id)
}
else {
drawnode.to_owned()
}
}
fn assign_position_from_origin(&mut self, current_pos: &mut [f64; 2], sx: &str, sy: &str) {
let x = parse_float!(sx);
let y = -parse_float!(sy);
// Set this point to origin if we have none already,
// otherwise offset by this starting point.
match self.origin {
Some(origin) => *current_pos = [x - origin[0], y - origin[1]],
None => self.origin = Some([x, y])
}
}
}
| parse_rect | identifier_name |
emitter.rs | extern crate xml;
use polyfill;
use std::str::FromStr;
use std::collections::BTreeMap;
/// Parse a float with appropriate panic message on failure.
macro_rules! parse_float {
($s:expr) => (f64::from_str($s).expect("Failed to parse float"))
}
pub struct Color {
r: u8, g: u8, b: u8
}
impl Color {
pub fn black() -> Color { Color { r: 0, g: 0, b: 0 } }
// pub fn white() -> Color { Color { r: u8::max_value(), g: u8::max_value(), b: u8::max_value() } }
pub fn parse(hexcode: &str) -> Color {
let mut chars = hexcode.chars();
// Skip over the leading '#'
{
let hash = chars.next().expect("Empty string passed as color hexcode");
assert_eq!(hash, '#');
}
// Grab 2 chars, parse those as hexadecimal
let mut next_component = || {
let a = chars.next().expect("Ran out of chars while parsing color hexcode");
let b = chars.next().expect("Ran out of chars while parsing color hexcode");
let ab = String::from_utf8(vec![a as u8, b as u8]).expect("What");
u8::from_str_radix(&ab, 16).expect("Invalid hex number in color string")
};
let r = next_component();
let g = next_component();
let b = next_component();
Color { r:r, g:g, b:b }
}
pub fn emit(&self) -> String {
format!(
"cocos2d::Color4F(cocos2d::Color3B({},{},{}))",
self.r, self.g, self.b
)
}
}
trait Shape {
/// Should generate code to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if!expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
/// |
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"x" => params.0 = Some(attr.value.clone()),
"y" => params.1 = Some(attr.value.clone()),
"width" => params.2 = Some(attr.value.clone()),
"height" => params.3 = Some(attr.value.clone()),
"style" => params.4 = Some(attr.value.clone()),
_ => {}
}
if let (Some(x), Some(y), Some(w), Some(h), Some(style)) = params {
let mut xy = [0.0, 0.0];
self.assign_position_from_origin(&mut xy, &x, &y);
let fw = parse_float!(&w);
let fh = parse_float!(&h);
return Rect {
x: xy[0], y: xy[1] - fh,
w: fw, h: fh,
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
}
}
}
panic!("Invalid rect");
}
// =========== Private helper functions ===========
fn parse_color_from_style(style: &str) -> Option<Color> {
let exprs = style.split(';');
for ref expr in exprs {
let mut keyval = expr.split(':');
let key = keyval.next().expect("Invalid style entry");
if key == "fill" {
let val = keyval.next().expect("Invalid style entry");
return Some(Color::parse(val));
}
}
None
}
fn varname(id: &str, drawnode: &str) -> String {
if drawnode.contains("{id}") {
drawnode.to_owned().replace("{id}", id)
}
else {
drawnode.to_owned()
}
}
fn assign_position_from_origin(&mut self, current_pos: &mut [f64; 2], sx: &str, sy: &str) {
let x = parse_float!(sx);
let y = -parse_float!(sy);
// Set this point to origin if we have none already,
// otherwise offset by this starting point.
match self.origin {
Some(origin) => *current_pos = [x - origin[0], y - origin[1]],
None => self.origin = Some([x, y])
}
}
} | /// Parse a rect with origin at the bottom right (??)
///
pub fn parse_rect(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None); | random_line_split |
emitter.rs | extern crate xml;
use polyfill;
use std::str::FromStr;
use std::collections::BTreeMap;
/// Parse a float with appropriate panic message on failure.
macro_rules! parse_float {
($s:expr) => (f64::from_str($s).expect("Failed to parse float"))
}
pub struct Color {
r: u8, g: u8, b: u8
}
impl Color {
pub fn black() -> Color { Color { r: 0, g: 0, b: 0 } }
// pub fn white() -> Color { Color { r: u8::max_value(), g: u8::max_value(), b: u8::max_value() } }
pub fn parse(hexcode: &str) -> Color {
let mut chars = hexcode.chars();
// Skip over the leading '#'
{
let hash = chars.next().expect("Empty string passed as color hexcode");
assert_eq!(hash, '#');
}
// Grab 2 chars, parse those as hexadecimal
let mut next_component = || {
let a = chars.next().expect("Ran out of chars while parsing color hexcode");
let b = chars.next().expect("Ran out of chars while parsing color hexcode");
let ab = String::from_utf8(vec![a as u8, b as u8]).expect("What");
u8::from_str_radix(&ab, 16).expect("Invalid hex number in color string")
};
let r = next_component();
let g = next_component();
let b = next_component();
Color { r:r, g:g, b:b }
}
pub fn emit(&self) -> String {
format!(
"cocos2d::Color4F(cocos2d::Color3B({},{},{}))",
self.r, self.g, self.b
)
}
}
trait Shape {
/// Should generate code to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) |
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if!expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
///
/// Parse a rect with origin at the bottom right (??)
///
pub fn parse_rect(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"x" => params.0 = Some(attr.value.clone()),
"y" => params.1 = Some(attr.value.clone()),
"width" => params.2 = Some(attr.value.clone()),
"height" => params.3 = Some(attr.value.clone()),
"style" => params.4 = Some(attr.value.clone()),
_ => {}
}
if let (Some(x), Some(y), Some(w), Some(h), Some(style)) = params {
let mut xy = [0.0, 0.0];
self.assign_position_from_origin(&mut xy, &x, &y);
let fw = parse_float!(&w);
let fh = parse_float!(&h);
return Rect {
x: xy[0], y: xy[1] - fh,
w: fw, h: fh,
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
}
}
}
panic!("Invalid rect");
}
// =========== Private helper functions ===========
fn parse_color_from_style(style: &str) -> Option<Color> {
let exprs = style.split(';');
for ref expr in exprs {
let mut keyval = expr.split(':');
let key = keyval.next().expect("Invalid style entry");
if key == "fill" {
let val = keyval.next().expect("Invalid style entry");
return Some(Color::parse(val));
}
}
None
}
fn varname(id: &str, drawnode: &str) -> String {
if drawnode.contains("{id}") {
drawnode.to_owned().replace("{id}", id)
}
else {
drawnode.to_owned()
}
}
fn assign_position_from_origin(&mut self, current_pos: &mut [f64; 2], sx: &str, sy: &str) {
let x = parse_float!(sx);
let y = -parse_float!(sy);
// Set this point to origin if we have none already,
// otherwise offset by this starting point.
match self.origin {
Some(origin) => *current_pos = [x - origin[0], y - origin[1]],
None => self.origin = Some([x, y])
}
}
}
| {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
} | identifier_body |
block.rs | use crate::error::{ViuError, ViuResult};
use crate::printer::Printer;
use crate::Config;
use ansi_colours::ansi256_from_rgb;
use image::{DynamicImage, GenericImageView, Rgba};
use std::io::Write;
use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crossterm::cursor::{MoveRight, MoveTo, MoveToPreviousLine};
use crossterm::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if!row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
new_color = ColorSpec::new();
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else |
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn test_block_printer_large() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(2000, 1000));
let config = Config {
width: Some(160),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 160);
assert_eq!(h, 40);
}
}
| {
Color::Ansi256(ansi256_from_rgb(rgb))
} | conditional_block |
block.rs | use crate::error::{ViuError, ViuResult};
use crate::printer::Printer;
use crate::Config;
use ansi_colours::ansi256_from_rgb;
use image::{DynamicImage, GenericImageView, Rgba};
use std::io::Write;
use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crossterm::cursor::{MoveRight, MoveTo, MoveToPreviousLine};
use crossterm::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if!row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row { | if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn test_block_printer_large() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(2000, 1000));
let config = Config {
width: Some(160),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 160);
assert_eq!(h, 40);
}
} | new_color = ColorSpec::new(); | random_line_split |
block.rs | use crate::error::{ViuError, ViuResult};
use crate::printer::Printer;
use crate::Config;
use ansi_colours::ansi256_from_rgb;
use image::{DynamicImage, GenericImageView, Rgba};
use std::io::Write;
use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crossterm::cursor::{MoveRight, MoveTo, MoveToPreviousLine};
use crossterm::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if!row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
new_color = ColorSpec::new();
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn | () {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(2000, 1000));
let config = Config {
width: Some(160),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 160);
assert_eq!(h, 40);
}
}
| test_block_printer_large | identifier_name |
immutable.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt::Debug;
use std::iter::FromIterator;
use std::ptr::NonNull;
use std::sync::Arc;
use std::{convert::AsRef, usize};
use crate::util::bit_chunk_iterator::BitChunks;
use crate::{
bytes::{Bytes, Deallocation},
datatypes::ArrowNativeType,
ffi,
};
use super::ops::bitwise_unary_op_helper;
use super::MutableBuffer;
/// Buffer represents a contiguous memory region that can be shared with other buffers and across
/// thread boundaries.
#[derive(Clone, PartialEq, Debug)]
pub struct Buffer {
/// the internal byte buffer.
data: Arc<Bytes>,
/// The offset into the buffer.
offset: usize,
}
impl Buffer {
/// Auxiliary method to create a new Buffer
#[inline]
pub fn from_bytes(bytes: Bytes) -> Self {
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Initializes a [Buffer] from a slice of items.
pub fn | <U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
}
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok());
assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
macro_rules! check_as_typed_data {
($input: expr, $native_t: ty) => {{
let buffer = Buffer::from_slice_ref($input);
let slice: &[$native_t] = unsafe { buffer.typed_data::<$native_t>() };
assert_eq!($input, slice);
}};
}
#[test]
#[allow(clippy::float_cmp)]
fn test_as_typed_data() {
check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
}
#[test]
fn test_count_bits() {
assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits());
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits());
assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits());
assert_eq!(6, Buffer::from(&[0b01001001, 0b01010010]).count_set_bits());
assert_eq!(16, Buffer::from(&[0b11111111, 0b11111111]).count_set_bits());
}
#[test]
fn test_count_bits_slice() {
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b00000000])
.slice(1)
.count_set_bits()
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111])
.slice(1)
.count_set_bits()
);
assert_eq!(
3,
Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
.slice(2)
.count_set_bits()
);
assert_eq!(
6,
Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
.slice(1)
.count_set_bits()
);
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
.slice(2)
.count_set_bits()
);
}
#[test]
fn test_count_bits_offset_slice() {
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
);
assert_eq!(
5,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
);
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
);
assert_eq!(
2,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
);
assert_eq!(
4,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
);
}
}
| from_slice_ref | identifier_name |
immutable.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt::Debug;
use std::iter::FromIterator;
use std::ptr::NonNull;
use std::sync::Arc;
use std::{convert::AsRef, usize};
use crate::util::bit_chunk_iterator::BitChunks;
use crate::{
bytes::{Bytes, Deallocation},
datatypes::ArrowNativeType,
ffi,
};
use super::ops::bitwise_unary_op_helper;
use super::MutableBuffer;
/// Buffer represents a contiguous memory region that can be shared with other buffers and across
/// thread boundaries.
#[derive(Clone, PartialEq, Debug)]
pub struct Buffer {
/// the internal byte buffer.
data: Arc<Bytes>,
/// The offset into the buffer.
offset: usize,
}
impl Buffer {
/// Auxiliary method to create a new Buffer
#[inline]
pub fn from_bytes(bytes: Bytes) -> Self {
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Initializes a [Buffer] from a slice of items.
pub fn from_slice_ref<U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] |
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok());
assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
macro_rules! check_as_typed_data {
($input: expr, $native_t: ty) => {{
let buffer = Buffer::from_slice_ref($input);
let slice: &[$native_t] = unsafe { buffer.typed_data::<$native_t>() };
assert_eq!($input, slice);
}};
}
#[test]
#[allow(clippy::float_cmp)]
fn test_as_typed_data() {
check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
}
#[test]
fn test_count_bits() {
assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits());
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits());
assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits());
assert_eq!(6, Buffer::from(&[0b01001001, 0b01010010]).count_set_bits());
assert_eq!(16, Buffer::from(&[0b11111111, 0b11111111]).count_set_bits());
}
#[test]
fn test_count_bits_slice() {
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b00000000])
.slice(1)
.count_set_bits()
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111])
.slice(1)
.count_set_bits()
);
assert_eq!(
3,
Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
.slice(2)
.count_set_bits()
);
assert_eq!(
6,
Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
.slice(1)
.count_set_bits()
);
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
.slice(2)
.count_set_bits()
);
}
#[test]
fn test_count_bits_offset_slice() {
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
);
assert_eq!(
5,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
);
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
);
assert_eq!(
2,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
);
assert_eq!(
4,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
);
}
}
| {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
} | identifier_body |
immutable.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt::Debug;
use std::iter::FromIterator;
use std::ptr::NonNull;
use std::sync::Arc;
use std::{convert::AsRef, usize};
use crate::util::bit_chunk_iterator::BitChunks;
use crate::{
bytes::{Bytes, Deallocation},
datatypes::ArrowNativeType,
ffi,
};
use super::ops::bitwise_unary_op_helper;
use super::MutableBuffer;
/// Buffer represents a contiguous memory region that can be shared with other buffers and across
/// thread boundaries.
#[derive(Clone, PartialEq, Debug)]
pub struct Buffer {
/// the internal byte buffer.
data: Arc<Bytes>,
/// The offset into the buffer.
offset: usize,
}
impl Buffer {
/// Auxiliary method to create a new Buffer
#[inline]
pub fn from_bytes(bytes: Bytes) -> Self {
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Initializes a [Buffer] from a slice of items.
pub fn from_slice_ref<U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
}
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok()); | macro_rules! check_as_typed_data {
($input: expr, $native_t: ty) => {{
let buffer = Buffer::from_slice_ref($input);
let slice: &[$native_t] = unsafe { buffer.typed_data::<$native_t>() };
assert_eq!($input, slice);
}};
}
#[test]
#[allow(clippy::float_cmp)]
fn test_as_typed_data() {
check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
}
#[test]
fn test_count_bits() {
assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits());
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits());
assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits());
assert_eq!(6, Buffer::from(&[0b01001001, 0b01010010]).count_set_bits());
assert_eq!(16, Buffer::from(&[0b11111111, 0b11111111]).count_set_bits());
}
#[test]
fn test_count_bits_slice() {
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b00000000])
.slice(1)
.count_set_bits()
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111])
.slice(1)
.count_set_bits()
);
assert_eq!(
3,
Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
.slice(2)
.count_set_bits()
);
assert_eq!(
6,
Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
.slice(1)
.count_set_bits()
);
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
.slice(2)
.count_set_bits()
);
}
#[test]
fn test_count_bits_offset_slice() {
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
);
assert_eq!(
5,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
);
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
);
assert_eq!(
2,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
);
assert_eq!(
4,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
);
}
} | assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
| random_line_split |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if!NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void!= ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn | (&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void!= ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap!= 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) {
self.will_leak = true;
}
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
}
| bind_to_texture | identifier_name |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if!NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void!= ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
} | /// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void!= ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap!= 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) {
self.will_leak = true;
}
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
} | unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata { | random_line_split |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if!NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void!= ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void!= ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap!= 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) |
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
}
| {
self.will_leak = true;
} | identifier_body |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if!NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void!= ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak |
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void!= ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap!= 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) {
self.will_leak = true;
}
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
}
| {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
} | conditional_block |
backend.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Substrate blockchain trait
use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating},
Justifications,
};
use std::collections::btree_set::BTreeSet;
use crate::header_metadata::HeaderMetadata;
use crate::error::{Error, Result};
/// Blockchain database header backend. Does not perform any validation.
pub trait HeaderBackend<Block: BlockT>: Send + Sync {
/// Get block header. Returns `None` if block is not found.
fn header(&self, hash: Block::Hash) -> Result<Option<Block::Header>>;
/// Get blockchain info.
fn info(&self) -> Info<Block>;
/// Get block status.
fn status(&self, hash: Block::Hash) -> Result<BlockStatus>;
/// Get block number by hash. Returns `None` if the header is not in the chain.
fn number(
&self,
hash: Block::Hash,
) -> Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>>;
/// Get block hash by number. Returns `None` if the header is not in the chain.
fn hash(&self, number: NumberFor<Block>) -> Result<Option<Block::Hash>>;
/// Convert an arbitrary block ID into a block hash.
fn block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Option<Block::Hash>> {
match *id {
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => self.hash(n),
}
}
/// Convert an arbitrary block ID into a block hash.
fn block_number_from_id(&self, id: &BlockId<Block>) -> Result<Option<NumberFor<Block>>> {
match *id {
BlockId::Hash(h) => self.number(h),
BlockId::Number(n) => Ok(Some(n)),
}
}
/// Get block header. Returns `UnknownBlock` error if block is not found.
fn | (&self, hash: Block::Hash) -> Result<Block::Header> {
self.header(hash)?
.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash)))
}
/// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_number_from_id(&self, id: &BlockId<Block>) -> Result<NumberFor<Block>> {
self.block_number_from_id(id).and_then(|n| {
n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id)))
})
}
/// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Block::Hash> {
self.block_hash_from_id(id).and_then(|h| {
h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id)))
})
}
}
/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Best effort to get all the header hashes that are part of the provided forks
/// starting only from the fork heads.
///
/// The function tries to reconstruct the route from the fork head to the canonical chain.
/// If any of the hashes on the route can't be found in the db, the function won't be able
/// to reconstruct the route anymore. In this case it will give up expanding the current fork,
/// move on to the next ones and at the end it will return an error that also contains
/// the partially expanded forks.
fn expand_forks(
&self,
fork_heads: &[Block::Hash],
) -> std::result::Result<BTreeSet<Block::Hash>, (BTreeSet<Block::Hash>, Error)> {
let mut missing_blocks = vec![];
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut route_head = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(route_head) {
match self.header_metadata(route_head) {
Ok(meta) => {
// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
let parent_number = meta.number.saturating_sub(1u32.into());
match self.hash(parent_number) {
Ok(Some(parent_hash)) =>
if parent_hash == meta.parent {
break
},
Ok(None) | Err(_) => {
missing_blocks.push(BlockId::<Block>::Number(parent_number));
break
},
}
route_head = meta.parent;
},
Err(_e) => {
missing_blocks.push(BlockId::<Block>::Hash(route_head));
break
},
}
}
}
if!missing_blocks.is_empty() {
return Err((
expanded_forks,
Error::UnknownBlocks(format!(
"Missing stale headers {:?} while expanding forks {:?}.",
fork_heads, missing_blocks
)),
))
}
Ok(expanded_forks)
}
}
impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}
/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
{
/// Get block body. Returns `None` if block is not found.
fn body(&self, hash: Block::Hash) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justifications. Returns `None` if no justification exists.
fn justifications(&self, hash: Block::Hash) -> Result<Option<Justifications>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, highest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
/// Returns displaced leaves after the given block would be finalized.
///
/// The returned leaves do not contain the leaves from the same height as `block_number`.
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor<Block>,
) -> Result<Vec<Block::Hash>>;
/// Return hashes of all blocks that are children of the block with `parent_hash`.
fn children(&self, parent_hash: Block::Hash) -> Result<Vec<Block::Hash>>;
/// Get the most recent block hash of the longest chain that contains
/// a block with the given `base_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// Returns `Ok(None)` if `base_hash` is not found in search space.
// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn longest_containing(
&self,
base_hash: Block::Hash,
import_lock: &RwLock<()>,
) -> Result<Option<Block::Hash>> {
let Some(base_header) = self.header(base_hash)? else { return Ok(None) };
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_guard = import_lock.read();
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
}
self.leaves()?
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
let mut current_hash = leaf_hash;
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
}
let current_header = self
.header(current_hash)?
.ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
Ok(None)
}
/// Get single indexed transaction by content hash. Note that this will only fetch transactions
/// that are indexed by the runtime with `storage_index_transaction`.
fn indexed_transaction(&self, hash: Block::Hash) -> Result<Option<Vec<u8>>>;
/// Check if indexed transaction exists.
fn has_indexed_transaction(&self, hash: Block::Hash) -> Result<bool> {
Ok(self.indexed_transaction(hash)?.is_some())
}
fn block_indexed_body(&self, hash: Block::Hash) -> Result<Option<Vec<Vec<u8>>>>;
}
/// Blockchain info
#[derive(Debug, Eq, PartialEq)]
pub struct Info<Block: BlockT> {
/// Best block hash.
pub best_hash: Block::Hash,
/// Best block number.
pub best_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Genesis block hash.
pub genesis_hash: Block::Hash,
/// The head of the finalized chain.
pub finalized_hash: Block::Hash,
/// Last finalized block number.
pub finalized_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Last finalized state.
pub finalized_state: Option<(Block::Hash, <<Block as BlockT>::Header as HeaderT>::Number)>,
/// Number of concurrent leave forks.
pub number_leaves: usize,
/// Missing blocks after warp sync. (start, end).
pub block_gap: Option<(NumberFor<Block>, NumberFor<Block>)>,
}
/// Block status.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BlockStatus {
/// Already in the blockchain.
InChain,
/// Not in the queue or the blockchain.
Unknown,
}
| expect_header | identifier_name |
backend.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Substrate blockchain trait
use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating},
Justifications,
};
use std::collections::btree_set::BTreeSet;
use crate::header_metadata::HeaderMetadata;
use crate::error::{Error, Result};
/// Blockchain database header backend. Does not perform any validation.
pub trait HeaderBackend<Block: BlockT>: Send + Sync {
/// Get block header. Returns `None` if block is not found.
fn header(&self, hash: Block::Hash) -> Result<Option<Block::Header>>;
/// Get blockchain info.
fn info(&self) -> Info<Block>;
/// Get block status.
fn status(&self, hash: Block::Hash) -> Result<BlockStatus>;
/// Get block number by hash. Returns `None` if the header is not in the chain.
fn number(
&self,
hash: Block::Hash,
) -> Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>>;
/// Get block hash by number. Returns `None` if the header is not in the chain.
fn hash(&self, number: NumberFor<Block>) -> Result<Option<Block::Hash>>;
/// Convert an arbitrary block ID into a block hash.
fn block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Option<Block::Hash>> {
match *id {
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => self.hash(n),
}
}
/// Convert an arbitrary block ID into a block hash.
fn block_number_from_id(&self, id: &BlockId<Block>) -> Result<Option<NumberFor<Block>>> {
match *id {
BlockId::Hash(h) => self.number(h),
BlockId::Number(n) => Ok(Some(n)),
}
}
/// Get block header. Returns `UnknownBlock` error if block is not found.
fn expect_header(&self, hash: Block::Hash) -> Result<Block::Header> {
self.header(hash)?
.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash)))
}
/// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_number_from_id(&self, id: &BlockId<Block>) -> Result<NumberFor<Block>> {
self.block_number_from_id(id).and_then(|n| {
n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id)))
})
}
/// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Block::Hash> {
self.block_hash_from_id(id).and_then(|h| {
h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id)))
})
}
}
/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Best effort to get all the header hashes that are part of the provided forks
/// starting only from the fork heads.
///
/// The function tries to reconstruct the route from the fork head to the canonical chain.
/// If any of the hashes on the route can't be found in the db, the function won't be able
/// to reconstruct the route anymore. In this case it will give up expanding the current fork,
/// move on to the next ones and at the end it will return an error that also contains
/// the partially expanded forks.
fn expand_forks(
&self,
fork_heads: &[Block::Hash],
) -> std::result::Result<BTreeSet<Block::Hash>, (BTreeSet<Block::Hash>, Error)> {
let mut missing_blocks = vec![];
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut route_head = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(route_head) {
match self.header_metadata(route_head) {
Ok(meta) => {
// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
let parent_number = meta.number.saturating_sub(1u32.into());
match self.hash(parent_number) {
Ok(Some(parent_hash)) =>
if parent_hash == meta.parent | ,
Ok(None) | Err(_) => {
missing_blocks.push(BlockId::<Block>::Number(parent_number));
break
},
}
route_head = meta.parent;
},
Err(_e) => {
missing_blocks.push(BlockId::<Block>::Hash(route_head));
break
},
}
}
}
if!missing_blocks.is_empty() {
return Err((
expanded_forks,
Error::UnknownBlocks(format!(
"Missing stale headers {:?} while expanding forks {:?}.",
fork_heads, missing_blocks
)),
))
}
Ok(expanded_forks)
}
}
impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}
/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
{
/// Get block body. Returns `None` if block is not found.
fn body(&self, hash: Block::Hash) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justifications. Returns `None` if no justification exists.
fn justifications(&self, hash: Block::Hash) -> Result<Option<Justifications>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, highest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
/// Returns displaced leaves after the given block would be finalized.
///
/// The returned leaves do not contain the leaves from the same height as `block_number`.
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor<Block>,
) -> Result<Vec<Block::Hash>>;
/// Return hashes of all blocks that are children of the block with `parent_hash`.
fn children(&self, parent_hash: Block::Hash) -> Result<Vec<Block::Hash>>;
/// Get the most recent block hash of the longest chain that contains
/// a block with the given `base_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// Returns `Ok(None)` if `base_hash` is not found in search space.
// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn longest_containing(
&self,
base_hash: Block::Hash,
import_lock: &RwLock<()>,
) -> Result<Option<Block::Hash>> {
let Some(base_header) = self.header(base_hash)? else { return Ok(None) };
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_guard = import_lock.read();
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
}
self.leaves()?
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
let mut current_hash = leaf_hash;
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
}
let current_header = self
.header(current_hash)?
.ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
Ok(None)
}
/// Get single indexed transaction by content hash. Note that this will only fetch transactions
/// that are indexed by the runtime with `storage_index_transaction`.
fn indexed_transaction(&self, hash: Block::Hash) -> Result<Option<Vec<u8>>>;
/// Check if indexed transaction exists.
fn has_indexed_transaction(&self, hash: Block::Hash) -> Result<bool> {
Ok(self.indexed_transaction(hash)?.is_some())
}
fn block_indexed_body(&self, hash: Block::Hash) -> Result<Option<Vec<Vec<u8>>>>;
}
/// Blockchain info
#[derive(Debug, Eq, PartialEq)]
pub struct Info<Block: BlockT> {
/// Best block hash.
pub best_hash: Block::Hash,
/// Best block number.
pub best_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Genesis block hash.
pub genesis_hash: Block::Hash,
/// The head of the finalized chain.
pub finalized_hash: Block::Hash,
/// Last finalized block number.
pub finalized_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Last finalized state.
pub finalized_state: Option<(Block::Hash, <<Block as BlockT>::Header as HeaderT>::Number)>,
/// Number of concurrent leave forks.
pub number_leaves: usize,
/// Missing blocks after warp sync. (start, end).
pub block_gap: Option<(NumberFor<Block>, NumberFor<Block>)>,
}
/// Block status.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BlockStatus {
/// Already in the blockchain.
InChain,
/// Not in the queue or the blockchain.
Unknown,
}
| {
break
} | conditional_block |
backend.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Substrate blockchain trait
use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating},
Justifications,
};
use std::collections::btree_set::BTreeSet;
use crate::header_metadata::HeaderMetadata;
use crate::error::{Error, Result};
/// Blockchain database header backend. Does not perform any validation.
pub trait HeaderBackend<Block: BlockT>: Send + Sync {
/// Get block header. Returns `None` if block is not found.
fn header(&self, hash: Block::Hash) -> Result<Option<Block::Header>>;
/// Get blockchain info.
fn info(&self) -> Info<Block>;
/// Get block status.
fn status(&self, hash: Block::Hash) -> Result<BlockStatus>;
/// Get block number by hash. Returns `None` if the header is not in the chain.
fn number(
&self,
hash: Block::Hash,
) -> Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>>;
/// Get block hash by number. Returns `None` if the header is not in the chain.
fn hash(&self, number: NumberFor<Block>) -> Result<Option<Block::Hash>>;
/// Convert an arbitrary block ID into a block hash.
fn block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Option<Block::Hash>> {
match *id {
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => self.hash(n),
}
}
/// Convert an arbitrary block ID into a block hash.
fn block_number_from_id(&self, id: &BlockId<Block>) -> Result<Option<NumberFor<Block>>> {
match *id {
BlockId::Hash(h) => self.number(h),
BlockId::Number(n) => Ok(Some(n)),
}
}
/// Get block header. Returns `UnknownBlock` error if block is not found.
fn expect_header(&self, hash: Block::Hash) -> Result<Block::Header> {
self.header(hash)?
.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash)))
}
/// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_number_from_id(&self, id: &BlockId<Block>) -> Result<NumberFor<Block>> {
self.block_number_from_id(id).and_then(|n| {
n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id)))
})
}
/// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Block::Hash> {
self.block_hash_from_id(id).and_then(|h| {
h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id)))
})
}
}
/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Best effort to get all the header hashes that are part of the provided forks
/// starting only from the fork heads.
///
/// The function tries to reconstruct the route from the fork head to the canonical chain.
/// If any of the hashes on the route can't be found in the db, the function won't be able
/// to reconstruct the route anymore. In this case it will give up expanding the current fork,
/// move on to the next ones and at the end it will return an error that also contains
/// the partially expanded forks.
fn expand_forks(
&self,
fork_heads: &[Block::Hash],
) -> std::result::Result<BTreeSet<Block::Hash>, (BTreeSet<Block::Hash>, Error)> {
let mut missing_blocks = vec![];
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut route_head = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(route_head) {
match self.header_metadata(route_head) {
Ok(meta) => {
// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
let parent_number = meta.number.saturating_sub(1u32.into());
match self.hash(parent_number) {
Ok(Some(parent_hash)) =>
if parent_hash == meta.parent {
break
},
Ok(None) | Err(_) => {
missing_blocks.push(BlockId::<Block>::Number(parent_number));
break
},
}
route_head = meta.parent;
},
Err(_e) => {
missing_blocks.push(BlockId::<Block>::Hash(route_head));
break
},
}
}
}
if!missing_blocks.is_empty() {
return Err((
expanded_forks,
Error::UnknownBlocks(format!(
"Missing stale headers {:?} while expanding forks {:?}.",
fork_heads, missing_blocks
)),
))
}
Ok(expanded_forks)
}
}
impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}
/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
{
/// Get block body. Returns `None` if block is not found.
fn body(&self, hash: Block::Hash) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justifications. Returns `None` if no justification exists.
fn justifications(&self, hash: Block::Hash) -> Result<Option<Justifications>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, highest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
/// Returns displaced leaves after the given block would be finalized.
///
/// The returned leaves do not contain the leaves from the same height as `block_number`.
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor<Block>,
) -> Result<Vec<Block::Hash>>;
/// Return hashes of all blocks that are children of the block with `parent_hash`.
fn children(&self, parent_hash: Block::Hash) -> Result<Vec<Block::Hash>>;
/// Get the most recent block hash of the longest chain that contains
/// a block with the given `base_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// Returns `Ok(None)` if `base_hash` is not found in search space.
// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn longest_containing(
&self,
base_hash: Block::Hash,
import_lock: &RwLock<()>,
) -> Result<Option<Block::Hash>> {
let Some(base_header) = self.header(base_hash)? else { return Ok(None) };
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_guard = import_lock.read();
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
}
self.leaves()?
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
let mut current_hash = leaf_hash;
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
}
let current_header = self
.header(current_hash)?
.ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
Ok(None)
}
/// Get single indexed transaction by content hash. Note that this will only fetch transactions
/// that are indexed by the runtime with `storage_index_transaction`.
fn indexed_transaction(&self, hash: Block::Hash) -> Result<Option<Vec<u8>>>;
/// Check if indexed transaction exists.
fn has_indexed_transaction(&self, hash: Block::Hash) -> Result<bool> {
Ok(self.indexed_transaction(hash)?.is_some())
}
fn block_indexed_body(&self, hash: Block::Hash) -> Result<Option<Vec<Vec<u8>>>>;
}
/// Blockchain info
#[derive(Debug, Eq, PartialEq)]
pub struct Info<Block: BlockT> {
/// Best block hash.
pub best_hash: Block::Hash,
/// Best block number.
pub best_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Genesis block hash.
pub genesis_hash: Block::Hash,
/// The head of the finalized chain.
pub finalized_hash: Block::Hash,
/// Last finalized block number.
pub finalized_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Last finalized state.
pub finalized_state: Option<(Block::Hash, <<Block as BlockT>::Header as HeaderT>::Number)>,
/// Number of concurrent leave forks.
pub number_leaves: usize,
/// Missing blocks after warp sync. (start, end).
pub block_gap: Option<(NumberFor<Block>, NumberFor<Block>)>,
}
/// Block status.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] | Unknown,
} | pub enum BlockStatus {
/// Already in the blockchain.
InChain,
/// Not in the queue or the blockchain. | random_line_split |
mod.rs | //! General actions
#![allow(unused_imports)]
#![allow(dead_code)]
use chrono::*;
use std::{env,fs};
use std::time;
use std::fmt::Write;
use std::path::{Path,PathBuf};
use util;
use super::BillType;
use storage::{Storage,StorageDir,Storable,StorageResult};
use project::Project;
#[cfg(feature="document_export")]
use fill_docs::fill_template;
pub mod error;
use self::error::*;
/// Sets up an instance of `Storage`.
pub fn setup_luigi() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn projects_to_csv(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions").expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if!template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if!force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
else {
debug!("I expected there to be a {}, but there wasn't any?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> |
/// Testing only, tries to run complete spec on all projects.
/// TODO make this not panic :D
/// TODO move this to `spec::all_the_things`
pub fn spec() -> Result<()> {
use project::spec::*;
let luigi = try!(setup_luigi());
//let projects = super::execute(||luigi.open_projects(StorageDir::All));
let projects = try!(luigi.open_projects(StorageDir::Working));
for project in projects{
info!("{}", project.dir().display());
let yaml = project.yaml();
client::validate(&yaml).map_err(|errors|for error in errors{
println!(" error: {}", error);
}).unwrap();
client::full_name(&yaml);
client::first_name(&yaml);
client::title(&yaml);
client::email(&yaml);
hours::caterers_string(&yaml);
invoice::number_long_str(&yaml);
invoice::number_str(&yaml);
offer::number(&yaml);
project.age().map(|a|format!("{} days", a)).unwrap();
project.date().map(|d|d.year().to_string()).unwrap();
project.sum_sold().map(|c|util::currency_to_string(&c)).unwrap();
project::manager(&yaml).map(|s|s.to_owned()).unwrap();
project::name(&yaml).map(|s|s.to_owned()).unwrap();
}
Ok(())
}
pub fn delete_project_confirmation(dir: StorageDir, search_terms:&[&str]) -> Result<()> {
let luigi = try!(setup_luigi());
for project in try!(luigi.search_projects_any(dir, search_terms)) {
try!(project.delete_project_dir_if(
|| util::really(&format!("you want me to delete {:?} [y/N]", project.dir())) && util::really("really? [y/N]")
))
}
Ok(())
}
pub fn archive_projects(search_terms:&[&str], manual_year:Option<i32>, force:bool) -> Result<Vec<PathBuf>>{
trace!("archive_projects matching ({:?},{:?},{:?})", search_terms, manual_year,force);
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.archive_projects_if(search_terms, manual_year, || force) ))
}
/// Command UNARCHIVE <YEAR> <NAME>
/// TODO: return a list of files that have to be updated in git
pub fn unarchive_projects(year:i32, search_terms:&[&str]) -> Result<Vec<PathBuf>> {
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.unarchive_projects(year, search_terms) ))
}
| {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
} | identifier_body |
mod.rs | //! General actions
#![allow(unused_imports)]
#![allow(dead_code)]
use chrono::*;
use std::{env,fs};
use std::time;
use std::fmt::Write;
use std::path::{Path,PathBuf};
use util;
use super::BillType;
use storage::{Storage,StorageDir,Storable,StorageResult};
use project::Project;
#[cfg(feature="document_export")]
use fill_docs::fill_template;
pub mod error;
use self::error::*;
/// Sets up an instance of `Storage`.
pub fn setup_luigi() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn projects_to_csv(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions").expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if!template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if!force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
else {
debug!("I expected there to be a {}, but there wasn't any?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
/// Testing only, tries to run complete spec on all projects.
/// TODO make this not panic :D
/// TODO move this to `spec::all_the_things`
pub fn spec() -> Result<()> {
use project::spec::*;
let luigi = try!(setup_luigi());
//let projects = super::execute(||luigi.open_projects(StorageDir::All));
let projects = try!(luigi.open_projects(StorageDir::Working));
for project in projects{
info!("{}", project.dir().display());
let yaml = project.yaml();
client::validate(&yaml).map_err(|errors|for error in errors{
println!(" error: {}", error);
}).unwrap();
client::full_name(&yaml);
client::first_name(&yaml);
client::title(&yaml);
client::email(&yaml);
hours::caterers_string(&yaml);
invoice::number_long_str(&yaml);
invoice::number_str(&yaml);
offer::number(&yaml);
project.age().map(|a|format!("{} days", a)).unwrap();
project.date().map(|d|d.year().to_string()).unwrap();
project.sum_sold().map(|c|util::currency_to_string(&c)).unwrap();
project::manager(&yaml).map(|s|s.to_owned()).unwrap();
project::name(&yaml).map(|s|s.to_owned()).unwrap();
}
Ok(())
}
pub fn delete_project_confirmation(dir: StorageDir, search_terms:&[&str]) -> Result<()> {
let luigi = try!(setup_luigi());
for project in try!(luigi.search_projects_any(dir, search_terms)) {
try!(project.delete_project_dir_if(
|| util::really(&format!("you want me to delete {:?} [y/N]", project.dir())) && util::really("really? [y/N]")
))
}
Ok(())
}
pub fn archive_projects(search_terms:&[&str], manual_year:Option<i32>, force:bool) -> Result<Vec<PathBuf>>{
trace!("archive_projects matching ({:?},{:?},{:?})", search_terms, manual_year,force);
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.archive_projects_if(search_terms, manual_year, || force) )) | }
/// Command UNARCHIVE <YEAR> <NAME>
/// TODO: return a list of files that have to be updated in git
pub fn unarchive_projects(year:i32, search_terms:&[&str]) -> Result<Vec<PathBuf>> {
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.unarchive_projects(year, search_terms) ))
} | random_line_split |
|
mod.rs | //! General actions
#![allow(unused_imports)]
#![allow(dead_code)]
use chrono::*;
use std::{env,fs};
use std::time;
use std::fmt::Write;
use std::path::{Path,PathBuf};
use util;
use super::BillType;
use storage::{Storage,StorageDir,Storable,StorageResult};
use project::Project;
#[cfg(feature="document_export")]
use fill_docs::fill_template;
pub mod error;
use self::error::*;
/// Sets up an instance of `Storage`.
pub fn setup_luigi() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn | (projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions").expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if!template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if!force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
else {
debug!("I expected there to be a {}, but there wasn't any?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
/// Testing only, tries to run complete spec on all projects.
/// TODO make this not panic :D
/// TODO move this to `spec::all_the_things`
pub fn spec() -> Result<()> {
use project::spec::*;
let luigi = try!(setup_luigi());
//let projects = super::execute(||luigi.open_projects(StorageDir::All));
let projects = try!(luigi.open_projects(StorageDir::Working));
for project in projects{
info!("{}", project.dir().display());
let yaml = project.yaml();
client::validate(&yaml).map_err(|errors|for error in errors{
println!(" error: {}", error);
}).unwrap();
client::full_name(&yaml);
client::first_name(&yaml);
client::title(&yaml);
client::email(&yaml);
hours::caterers_string(&yaml);
invoice::number_long_str(&yaml);
invoice::number_str(&yaml);
offer::number(&yaml);
project.age().map(|a|format!("{} days", a)).unwrap();
project.date().map(|d|d.year().to_string()).unwrap();
project.sum_sold().map(|c|util::currency_to_string(&c)).unwrap();
project::manager(&yaml).map(|s|s.to_owned()).unwrap();
project::name(&yaml).map(|s|s.to_owned()).unwrap();
}
Ok(())
}
pub fn delete_project_confirmation(dir: StorageDir, search_terms:&[&str]) -> Result<()> {
let luigi = try!(setup_luigi());
for project in try!(luigi.search_projects_any(dir, search_terms)) {
try!(project.delete_project_dir_if(
|| util::really(&format!("you want me to delete {:?} [y/N]", project.dir())) && util::really("really? [y/N]")
))
}
Ok(())
}
pub fn archive_projects(search_terms:&[&str], manual_year:Option<i32>, force:bool) -> Result<Vec<PathBuf>>{
trace!("archive_projects matching ({:?},{:?},{:?})", search_terms, manual_year,force);
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.archive_projects_if(search_terms, manual_year, || force) ))
}
/// Command UNARCHIVE <YEAR> <NAME>
/// TODO: return a list of files that have to be updated in git
pub fn unarchive_projects(year:i32, search_terms:&[&str]) -> Result<Vec<PathBuf>> {
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.unarchive_projects(year, search_terms) ))
}
| projects_to_csv | identifier_name |
mod.rs | //! General actions
#![allow(unused_imports)]
#![allow(dead_code)]
use chrono::*;
use std::{env,fs};
use std::time;
use std::fmt::Write;
use std::path::{Path,PathBuf};
use util;
use super::BillType;
use storage::{Storage,StorageDir,Storable,StorageResult};
use project::Project;
#[cfg(feature="document_export")]
use fill_docs::fill_template;
pub mod error;
use self::error::*;
/// Sets up an instance of `Storage`.
pub fn setup_luigi() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn projects_to_csv(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions").expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if!template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if!force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() |
else {
debug!("I expected there to be a {}, but there wasn't any?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
/// Testing only, tries to run complete spec on all projects.
/// TODO make this not panic :D
/// TODO move this to `spec::all_the_things`
pub fn spec() -> Result<()> {
use project::spec::*;
let luigi = try!(setup_luigi());
//let projects = super::execute(||luigi.open_projects(StorageDir::All));
let projects = try!(luigi.open_projects(StorageDir::Working));
for project in projects{
info!("{}", project.dir().display());
let yaml = project.yaml();
client::validate(&yaml).map_err(|errors|for error in errors{
println!(" error: {}", error);
}).unwrap();
client::full_name(&yaml);
client::first_name(&yaml);
client::title(&yaml);
client::email(&yaml);
hours::caterers_string(&yaml);
invoice::number_long_str(&yaml);
invoice::number_str(&yaml);
offer::number(&yaml);
project.age().map(|a|format!("{} days", a)).unwrap();
project.date().map(|d|d.year().to_string()).unwrap();
project.sum_sold().map(|c|util::currency_to_string(&c)).unwrap();
project::manager(&yaml).map(|s|s.to_owned()).unwrap();
project::name(&yaml).map(|s|s.to_owned()).unwrap();
}
Ok(())
}
pub fn delete_project_confirmation(dir: StorageDir, search_terms:&[&str]) -> Result<()> {
let luigi = try!(setup_luigi());
for project in try!(luigi.search_projects_any(dir, search_terms)) {
try!(project.delete_project_dir_if(
|| util::really(&format!("you want me to delete {:?} [y/N]", project.dir())) && util::really("really? [y/N]")
))
}
Ok(())
}
pub fn archive_projects(search_terms:&[&str], manual_year:Option<i32>, force:bool) -> Result<Vec<PathBuf>>{
trace!("archive_projects matching ({:?},{:?},{:?})", search_terms, manual_year,force);
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.archive_projects_if(search_terms, manual_year, || force) ))
}
/// Command UNARCHIVE <YEAR> <NAME>
/// TODO: return a list of files that have to be updated in git
pub fn unarchive_projects(year:i32, search_terms:&[&str]) -> Result<Vec<PathBuf>> {
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.unarchive_projects(year, search_terms) ))
}
| {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
} | conditional_block |
main.rs |
use std::time::{SystemTime, UNIX_EPOCH};
fn f0() {
println!("Hello, Rust");
}
fn f1() {
let a = 12;
println!("a is {}", a);
println!("a is {}, a again is {}", a, a);
println!("a is {0}, a again is {0}", a);
}
fn f2() {
println!("{{}}");
}
fn f3() {
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
}
fn f4() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
}
fn f5() {
let sum = 5 + 10; // 加
let difference = 95.5 - 4.3; // 减
let product = 4 * 30; // 乘
let quotient = 56.7 / 32.2; // 除
let remainder = 43 % 5; // 求余
}
fn f6() {
let tup: (i32, f64, u8) = (500, 6.4, 1);
// tup.0 等于 500
// tup.1 等于 6.4
// tup.2 等于 1
let (x, y, z) = tup;
// y 等于 6.4
let a = [1, 2, 3, 4, 5];
// a 是一个长度为 5 的整型数组
let b = ["January", "February", "March"];
// b 是一个长度为 3 的字符串数组
let c: [i32; 5] = [1, 2, 3, 4, 5];
// c 是一个长度为 5 的 i32 数组
let d = [3; 5];
// 等同于 let d = [3, 3, 3, 3, 3];
let first = a[0];
let second = a[1];
// 数组访问
let mut a = [1, 2, 3];
a[0] = 4; // 正确
}
fn add(a: i32, b: i32) -> i32 {
return a + b;
}
fn f7() {
println!("{}",add(2,3));
}
fn f8() {
println!("Hello, world!");
another_function2();
}
fn another_function2() {
println!("Hello, runoob!");
}
fn f9() {
another_function(5, 6);
}
fn another_function(x: i32, y: i32) {
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f10() {
let x = 5;
let y = {
let x = 3;
x + 1
};
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f11() {
fn five() -> i32 {
5
}
println!("five() 的值为: {}", five());
}
fn f12() {
let number = 3;
if number < 5 {
println!("条件为 true");
} else {
println!("条件为 false");
}
}
fn f13() {
let a = 12;
let b;
if a > 0 {
b = 1;
}
else if a < 0 {
b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number!= 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> String {
let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 } | u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f42() {
let t = Some(64);
match t {
Some(64) => println!("Yes"),
_ => println!("No"),
}
}
fn f43() {
enum Book {
Papery(u32),
Electronic(String)
}
let book = Book::Electronic(String::from("url"));
if let Book::Papery(index) = book {
println!("Papery {}", index);
} else {
println!("Not papery book");
}
}
mod nation {
pub mod government {
pub fn govern() {}
}
mod congress {
pub fn legislate() {}
}
mod court {
fn judicial() {
super::congress::legislate();
}
}
}
fn f44() {
nation::government::govern();
}
mod back_of_house {
pub struct Breakfast {
pub toast: String,
seasonal_fruit: String,
}
impl Breakfast {
pub fn summer(toast: &str) -> Breakfast {
Breakfast {
toast: String::from(toast),
seasonal_fruit: String::from("peaches"),
}
}
}
}
pub fn eat_at_restaurant() {
let mut meal = back_of_house::Breakfast::summer("Rye");
meal.toast = String::from("Wheat");
println!("I'd like {} toast please", meal.toast);
}
fn f45() {
eat_at_restaurant()
}
mod SomeModule {
pub enum Person {
King {
name: String
},
Quene
}
}
fn f46() {
let person = SomeModule::Person::King{
name: String::from("Blue")
};
match person {
SomeModule::Person::King {name} => {
println!("{}", name);
}
_ => {}
}
}
fn max2(array: &[i32]) -> i32 {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i] > array[max_index] {
max_index = i;
}
i += 1;
}
array[max_index]
}
fn f47() {
let a = [2, 4, 6, 3, 1];
println!("max = {}", max(&a));
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
fn f48() {
let p = Point { x: 1, y: 2 };
println!("p.x = {}", p.x());
}
trait Descriptive {
fn describe(&self) -> String {
String::from("[Object]")
}
}
struct Person {
name: String,
age: u8
}
impl Descriptive for Person {
fn describe(&self) -> String {
format!("{} {}", self.name, self.age)
}
}
fn f49() {
let cali = Person {
name: String::from("Cali"),
age: 24
};
println!("{}", cali.describe());
}
trait Comparable {
fn compare(&self, object: &Self) -> i8;
}
fn max<T: Comparable>(array: &[T]) -> &T {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i].compare(&array[max_index]) > 0 {
max_index = i;
}
i += 1;
}
&array[max_index]
}
impl Comparable for f64 {
fn compare(&self, object: &f64) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
impl Comparable for i8 {
fn compare(&self, object: &i8) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
fn f50() {
let arr = [1.0, 3.0, 5.0, 4.0, 2.0];
println!("maximum of arr is {}", max(&arr));
}
fn f51() {
let mut vector = vec![1, 2, 4, 8];
vector.push(16);
vector.push(32);
vector.push(64);
println!("{:?}", vector);
}
fn f52() {
let mut v1: Vec<i32> = vec![1, 2, 4, 8];
let mut v2: Vec<i32> = vec![16, 32, 64];
v1.append(&mut v2);
println!("{:?}", v1);
}
fn f53() {
let mut v = vec![1, 2, 4, 8];
println!("{}", match v.get(0) {
Some(value) => value.to_string(),
None => "None".to_string()
});
}
fn f54() {
let v = vec![1, 2, 4, 8];
println!("{}", v[1]);
}
fn f55() {
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
}
fn f56() {
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
}
fn f57() {
let s = String::from("hello中文");
for c in s.chars() {
println!("{}", c);
}
}
fn f58() {
let s = String::from("EN中文");
let a = s.chars().nth(2);
println!("{:?}", a);
}
fn f59() {
let s = String::from("EN中文");
let sub = &s[0..2];
println!("{}", sub);
}
fn f60() {
let s = String::from("ENEEEEEE");
let sub = &s[0..3];
println!("{}", sub);
}
pub struct ClassName {
field: i32,
}
impl ClassName {
pub fn new(value: i32) -> ClassName {
ClassName {
field: value
}
}
pub fn public_method(&self) {
println!("from public method");
self.private_method();
}
fn private_method(&self) {
println!("from private method");
}
}
fn f61() {
let object = ClassName::new(1024);
object.public_method();
}
fn timestamp1() -> i64 {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
let ms = since_the_epoch.as_secs() as i64 * 1000i64 + (since_the_epoch.subsec_nanos() as f64 / 1_000_000.0) as i64;
ms
}
fn main() {
let ts1 = timestamp1();
let mut i = 0;
while i < 10000 {
i = i+1;
f0();
f1();
f2();
f3();
f4();
f5();
f6();
f7();
f8();
f9();
f10();
f11();
f12();
f13();
f14();
f15();
f16();
f17();
f18();
f19();
f20();
f21();
f22();
f23();
f24();
f25();
f26();
f27();
f28();
f29();
f30();
f31();
f32();
f33();
f34();
f35();
f36();
f37();
f38();
f39();
f40();
f41();
f42();
f43();
f44();
f45();
f46();
f47();
f48();
f49();
f50();
f51();
f52();
f53();
f54();
f55();
f56();
f57();
f58();
f59();
f60();
f61();
}
let ts2 = timestamp1();
println!("TimeStamp2: {}", ts2);
println!("{:?}", ts2 - ts1);
}
| ;
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: | identifier_body |
main.rs |
use std::time::{SystemTime, UNIX_EPOCH};
fn f0() {
println!("Hello, Rust");
}
fn f1() {
let a = 12;
println!("a is {}", a);
println!("a is {}, a again is {}", a, a);
println!("a is {0}, a again is {0}", a);
}
fn f2() {
println!("{{}}");
}
fn f3() {
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
}
fn f4() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
}
fn f5() {
let sum = 5 + 10; // 加
let difference = 95.5 - 4.3; // 减
let product = 4 * 30; // 乘
let quotient = 56.7 / 32.2; // 除
let remainder = 43 % 5; // 求余
}
fn f6() {
let tup: (i32, f64, u8) = (500, 6.4, 1);
// tup.0 等于 500
// tup.1 等于 6.4
// tup.2 等于 1
let (x, y, z) = tup;
// y 等于 6.4
let a = [1, 2, 3, 4, 5];
// a 是一个长度为 5 的整型数组
let b = ["January", "February", "March"];
// b 是一个长度为 3 的字符串数组
let c: [i32; 5] = [1, 2, 3, 4, 5];
// c 是一个长度为 5 的 i32 数组
let d = [3; 5];
// 等同于 let d = [3, 3, 3, 3, 3];
let first = a[0];
let second = a[1];
// 数组访问
let mut a = [1, 2, 3];
a[0] = 4; // 正确
}
fn add(a: i32, b: i32) -> i32 {
return a + b;
}
fn f7() {
println!("{}",add(2,3));
}
fn f8() {
println!("Hello, world!");
another_function2();
}
fn another_function2() {
println!("Hello, runoob!");
}
fn f9() {
another_function(5, 6);
}
fn another_function(x: i32, y: i32) {
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f10() {
let x = 5;
let y = {
let x = 3;
x + 1
};
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f11() {
fn five() -> i32 {
5
}
println!("five() 的值为: {}", five());
}
fn f12() {
let number = 3;
if number < 5 {
println!("条件为 true");
} else {
println!("条件为 false");
}
}
fn f13() {
let a = 12;
let b;
if a > 0 {
| b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number!= 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> String {
let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 };
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f42() {
let t = Some(64);
match t {
Some(64) => println!("Yes"),
_ => println!("No"),
}
}
fn f43() {
enum Book {
Papery(u32),
Electronic(String)
}
let book = Book::Electronic(String::from("url"));
if let Book::Papery(index) = book {
println!("Papery {}", index);
} else {
println!("Not papery book");
}
}
mod nation {
pub mod government {
pub fn govern() {}
}
mod congress {
pub fn legislate() {}
}
mod court {
fn judicial() {
super::congress::legislate();
}
}
}
fn f44() {
nation::government::govern();
}
mod back_of_house {
pub struct Breakfast {
pub toast: String,
seasonal_fruit: String,
}
impl Breakfast {
pub fn summer(toast: &str) -> Breakfast {
Breakfast {
toast: String::from(toast),
seasonal_fruit: String::from("peaches"),
}
}
}
}
pub fn eat_at_restaurant() {
let mut meal = back_of_house::Breakfast::summer("Rye");
meal.toast = String::from("Wheat");
println!("I'd like {} toast please", meal.toast);
}
fn f45() {
eat_at_restaurant()
}
mod SomeModule {
pub enum Person {
King {
name: String
},
Quene
}
}
fn f46() {
let person = SomeModule::Person::King{
name: String::from("Blue")
};
match person {
SomeModule::Person::King {name} => {
println!("{}", name);
}
_ => {}
}
}
fn max2(array: &[i32]) -> i32 {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i] > array[max_index] {
max_index = i;
}
i += 1;
}
array[max_index]
}
fn f47() {
let a = [2, 4, 6, 3, 1];
println!("max = {}", max(&a));
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
fn f48() {
let p = Point { x: 1, y: 2 };
println!("p.x = {}", p.x());
}
trait Descriptive {
fn describe(&self) -> String {
String::from("[Object]")
}
}
struct Person {
name: String,
age: u8
}
impl Descriptive for Person {
fn describe(&self) -> String {
format!("{} {}", self.name, self.age)
}
}
fn f49() {
let cali = Person {
name: String::from("Cali"),
age: 24
};
println!("{}", cali.describe());
}
trait Comparable {
fn compare(&self, object: &Self) -> i8;
}
fn max<T: Comparable>(array: &[T]) -> &T {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i].compare(&array[max_index]) > 0 {
max_index = i;
}
i += 1;
}
&array[max_index]
}
impl Comparable for f64 {
fn compare(&self, object: &f64) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
impl Comparable for i8 {
fn compare(&self, object: &i8) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
fn f50() {
let arr = [1.0, 3.0, 5.0, 4.0, 2.0];
println!("maximum of arr is {}", max(&arr));
}
fn f51() {
let mut vector = vec![1, 2, 4, 8];
vector.push(16);
vector.push(32);
vector.push(64);
println!("{:?}", vector);
}
fn f52() {
let mut v1: Vec<i32> = vec![1, 2, 4, 8];
let mut v2: Vec<i32> = vec![16, 32, 64];
v1.append(&mut v2);
println!("{:?}", v1);
}
fn f53() {
let mut v = vec![1, 2, 4, 8];
println!("{}", match v.get(0) {
Some(value) => value.to_string(),
None => "None".to_string()
});
}
fn f54() {
let v = vec![1, 2, 4, 8];
println!("{}", v[1]);
}
fn f55() {
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
}
fn f56() {
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
}
fn f57() {
let s = String::from("hello中文");
for c in s.chars() {
println!("{}", c);
}
}
fn f58() {
let s = String::from("EN中文");
let a = s.chars().nth(2);
println!("{:?}", a);
}
fn f59() {
let s = String::from("EN中文");
let sub = &s[0..2];
println!("{}", sub);
}
fn f60() {
let s = String::from("ENEEEEEE");
let sub = &s[0..3];
println!("{}", sub);
}
pub struct ClassName {
field: i32,
}
impl ClassName {
pub fn new(value: i32) -> ClassName {
ClassName {
field: value
}
}
pub fn public_method(&self) {
println!("from public method");
self.private_method();
}
fn private_method(&self) {
println!("from private method");
}
}
fn f61() {
let object = ClassName::new(1024);
object.public_method();
}
fn timestamp1() -> i64 {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
let ms = since_the_epoch.as_secs() as i64 * 1000i64 + (since_the_epoch.subsec_nanos() as f64 / 1_000_000.0) as i64;
ms
}
fn main() {
let ts1 = timestamp1();
let mut i = 0;
while i < 10000 {
i = i+1;
f0();
f1();
f2();
f3();
f4();
f5();
f6();
f7();
f8();
f9();
f10();
f11();
f12();
f13();
f14();
f15();
f16();
f17();
f18();
f19();
f20();
f21();
f22();
f23();
f24();
f25();
f26();
f27();
f28();
f29();
f30();
f31();
f32();
f33();
f34();
f35();
f36();
f37();
f38();
f39();
f40();
f41();
f42();
f43();
f44();
f45();
f46();
f47();
f48();
f49();
f50();
f51();
f52();
f53();
f54();
f55();
f56();
f57();
f58();
f59();
f60();
f61();
}
let ts2 = timestamp1();
println!("TimeStamp2: {}", ts2);
println!("{:?}", ts2 - ts1);
}
| b = 1;
}
else if a < 0 {
| conditional_block |
main.rs | use std::time::{SystemTime, UNIX_EPOCH};
fn f0() {
println!("Hello, Rust");
}
fn f1() {
let a = 12;
println!("a is {}", a);
println!("a is {}, a again is {}", a, a);
println!("a is {0}, a again is {0}", a);
}
fn f2() {
println!("{{}}");
}
fn f3() {
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
}
fn f4() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
}
fn f5() {
let sum = 5 + 10; // 加
let difference = 95.5 - 4.3; // 减
let product = 4 * 30; // 乘
let quotient = 56.7 / 32.2; // 除
let remainder = 43 % 5; // 求余
}
fn f6() {
let tup: (i32, f64, u8) = (500, 6.4, 1);
// tup.0 等于 500
// tup.1 等于 6.4
// tup.2 等于 1
let (x, y, z) = tup;
// y 等于 6.4
let a = [1, 2, 3, 4, 5];
// a 是一个长度为 5 的整型数组
let b = ["January", "February", "March"];
// b 是一个长度为 3 的字符串数组
let c: [i32; 5] = [1, 2, 3, 4, 5];
// c 是一个长度为 5 的 i32 数组
let d = [3; 5];
// 等同于 let d = [3, 3, 3, 3, 3];
let first = a[0];
let second = a[1];
// 数组访问
let mut a = [1, 2, 3];
a[0] = 4; // 正确
}
fn add(a: i32, b: i32) -> i32 {
return a + b;
}
fn f7() {
println!("{}",add(2,3));
}
fn f8() {
println!("Hello, world!");
another_function2();
}
fn another_function2() {
println!("Hello, runoob!");
}
fn f9() {
another_function(5, 6);
}
fn another_function(x: i32, y: i32) {
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f10() {
let x = 5;
let y = {
let x = 3;
x + 1
};
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f11() {
fn five() -> i32 {
5
}
println!("five() 的值为: {}", five());
}
fn f12() {
let number = 3;
if number < 5 {
println!("条件为 true");
} else {
println!("条件为 false");
}
}
fn f13() {
let a = 12;
let b;
if a > 0 {
b = 1;
}
else if a < 0 {
b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number!= 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> String {
let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 };
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f42() {
let t = Some(64);
match t {
Some(64) => println!("Yes"),
_ => println!("No"),
}
}
fn f43() {
enum Book {
Papery(u32),
Electronic(String)
}
let book = Book::Electronic(String::from("url"));
if let Book::Papery(index) = book {
println!("Papery {}", index);
} else {
println!("Not papery book");
}
}
mod nation {
pub mod government {
pub fn govern() {}
}
mod congress {
pub fn legislate() {}
}
mod court {
fn judicial() {
super::congress::legislate();
}
}
}
fn f44() {
nation::government::govern();
}
mod back_of_house {
pub struct Breakfast {
pub toast: String,
seasonal_fruit: String,
}
impl Breakfast {
pub fn summer(toast: &str) -> Breakfast {
Breakfast {
toast: String::from(toast),
seasonal_fruit: String::from("peaches"),
}
}
}
}
pub fn eat_at_restaurant() {
let mut meal = back_of_house::Breakfast::summer("Rye");
meal.toast = String::from("Wheat");
println!("I'd like {} toast please", meal.toast);
}
fn f45() {
eat_at_restaurant()
}
mod SomeModule {
pub enum Person {
King {
name: String
},
Quene
}
}
fn f46() {
let person = SomeModule::Person::King{
name: String::from("Blue")
};
match person {
SomeModule::Person::King {name} => {
println!("{}", name);
}
_ => {}
}
}
fn max2(array: &[i32]) -> i32 {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i] > array[max_index] {
max_index = i;
}
i += 1;
}
array[max_index]
}
fn f47() {
let a = [2, 4, 6, 3, 1];
println!("max = {}", max(&a));
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
fn f48() {
let p = Point { x: 1, y: 2 };
println!("p.x = {}", p.x());
}
trait Descriptive {
fn describe(&self) -> String {
String::from("[Object]")
}
}
struct Person {
name: String,
age: u8
}
impl Descriptive for Person {
fn describe(&self) -> String {
format!("{} {}", self.name, self.age)
}
}
fn f49() {
let cali = Person {
name: String::from("Cali"),
age: 24
};
println!("{}", cali.describe());
}
trait Comparable {
fn compare(&self, object: &Self) -> i8;
}
fn max<T: Comparable>(array: &[T]) -> &T {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i].compare(&array[max_index]) > 0 {
max_index = i;
}
i += 1;
}
&array[max_index]
}
impl Comparable for f64 {
fn compare(&self, object: &f64) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
impl Comparable for i8 {
fn compare(&self, object: &i8) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
fn f50() {
let arr = [1.0, 3.0, 5.0, 4.0, 2.0];
println!("maximum of arr is {}", max(&arr));
}
fn f51() {
let mut vector = vec![1, 2, 4, 8];
vector.push(16);
vector.push(32);
vector.push(64);
println!("{:?}", vector);
}
fn f52() {
let mut v1: Vec<i32> = vec![1, 2, 4, 8];
let mut v2: Vec<i32> = vec![16, 32, 64];
v1.append(&mut v2);
println!("{:?}", v1);
}
fn f53() {
let mut v = vec![1, 2, 4, 8];
println!("{}", match v.get(0) {
Some(value) => value.to_string(),
None => "None".to_string()
});
}
fn f54() {
let v = vec![1, 2, 4, 8];
println!("{}", v[1]);
}
fn f55() {
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
}
fn f56() {
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
}
fn f57() {
let s = String::from("hello中文");
for c in s.chars() {
println!("{}", c);
}
}
fn f58() {
let s = String::from("EN中文");
let a = s.chars().nth(2);
println!("{:?}", a); | let sub = &s[0..2];
println!("{}", sub);
}
fn f60() {
let s = String::from("ENEEEEEE");
let sub = &s[0..3];
println!("{}", sub);
}
pub struct ClassName {
field: i32,
}
impl ClassName {
pub fn new(value: i32) -> ClassName {
ClassName {
field: value
}
}
pub fn public_method(&self) {
println!("from public method");
self.private_method();
}
fn private_method(&self) {
println!("from private method");
}
}
fn f61() {
let object = ClassName::new(1024);
object.public_method();
}
fn timestamp1() -> i64 {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
let ms = since_the_epoch.as_secs() as i64 * 1000i64 + (since_the_epoch.subsec_nanos() as f64 / 1_000_000.0) as i64;
ms
}
fn main() {
let ts1 = timestamp1();
let mut i = 0;
while i < 10000 {
i = i+1;
f0();
f1();
f2();
f3();
f4();
f5();
f6();
f7();
f8();
f9();
f10();
f11();
f12();
f13();
f14();
f15();
f16();
f17();
f18();
f19();
f20();
f21();
f22();
f23();
f24();
f25();
f26();
f27();
f28();
f29();
f30();
f31();
f32();
f33();
f34();
f35();
f36();
f37();
f38();
f39();
f40();
f41();
f42();
f43();
f44();
f45();
f46();
f47();
f48();
f49();
f50();
f51();
f52();
f53();
f54();
f55();
f56();
f57();
f58();
f59();
f60();
f61();
}
let ts2 = timestamp1();
println!("TimeStamp2: {}", ts2);
println!("{:?}", ts2 - ts1);
} | }
fn f59() {
let s = String::from("EN中文"); | random_line_split |
main.rs |
use std::time::{SystemTime, UNIX_EPOCH};
fn f0() {
println!("Hello, Rust");
}
fn f1() {
let a = 12;
println!("a is {}", a);
println!("a is {}, a again is {}", a, a);
println!("a is {0}, a again is {0}", a);
}
fn f2() {
println!("{{}}");
}
fn f3() {
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
}
fn f4() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
}
fn f5() {
let sum = 5 + 10; // 加
let difference = 95.5 - 4.3; // 减
let product = 4 * 30; // 乘
let quotient = 56.7 / 32.2; // 除
let remainder = 43 % 5; // 求余
}
fn f6() {
let tup: (i32, f64, u8) = (500, 6.4, 1);
// tup.0 等于 500
// tup.1 等于 6.4
// tup.2 等于 1
let (x, y, z) = tup;
// y 等于 6.4
let a = [1, 2, 3, 4, 5];
// a 是一个长度为 5 的整型数组
let b = ["January", "February", "March"];
// b 是一个长度为 3 的字符串数组
let c: [i32; 5] = [1, 2, 3, 4, 5];
// c 是一个长度为 5 的 i32 数组
let d = [3; 5];
// 等同于 let d = [3, 3, 3, 3, 3];
let first = a[0];
let second = a[1];
// 数组访问
let mut a = [1, 2, 3];
a[0] = 4; // 正确
}
fn add(a: i32, b: i32) -> i32 {
return a + b;
}
fn f7() {
println!("{}",add(2,3));
}
fn f8() {
println!("Hello, world!");
another_function2();
}
fn another_function2() {
println!("Hello, runoob!");
}
fn f9() {
another_function(5, 6);
}
fn another_function(x: i32, y: i32) {
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f10() {
let x = 5;
let y = {
let x = 3;
x + 1
};
println!("x 的值为 : {}", x);
println!("y 的值为 : {}", y);
}
fn f11() {
fn five() -> i32 {
5
}
println!("five() 的值为: {}", five());
}
fn f12() {
let number = 3;
if number < 5 {
println!("条件为 true");
} else {
println!("条件为 false");
}
}
fn f13() {
let a = 12;
let b;
if a > 0 {
b = 1;
}
else if a < 0 {
b = -1;
}
else {
b = 0;
}
println!("b is {}", b);
}
fn f14() {
let a = 3;
let number = if a > 0 { 1 } else { -1 };
println!("number 为 {}", number);
}
fn f15() {
let mut number = 1;
while number!= 4 {
println!("{}", number);
number += 1;
}
println!("EXIT");
}
fn f16() {
let a = [10, 20, 30, 40, 50];
for i in a.iter() {
println!("值为 : {}", i);
}
}
fn f17() {
let a = [10, 20, 30, 40, 50];
for i in 0..5 {
println!("a[{}] = {}", i, a[i]);
}
}
fn f18() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
loop {
let ch = s[i];
if ch == 'O' {
break;
}
println!("\'{}\'", ch);
i += 1;
}
}
fn f19() {
let s = ['R', 'U', 'N', 'O', 'O', 'B'];
let mut i = 0;
let location = loop {
let ch = s[i];
if ch == 'O' {
break i;
}
i += 1;
};
println!(" \'O\' 的索引为 {}", location);
}
fn f20() {
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1 = {}, s2 = {}", s1, s2);
}
fn f21() {
let s = String::from("hello");
// s 被声明有效
takes_ownership(s);
// s 的值被当作参数传入函数
// 所以可以当作 s 已经被移动,从这里开始已经无效
let x = 5;
// x 被声明有效
makes_copy(x);
// x 的值被当作参数传入函数
// 但 x 是基本类型,依然有效
// 在这里依然可以使用 x 却不能使用 s
} // 函数结束, x 无效, 然后是 s. 但 s 已被移动, 所以不用被释放
fn takes_ownership(some_string: String) {
// 一个 String 参数 some_string 传入,有效
println!("{}", some_string);
} // 函数结束, 参数 some_string 在这里释放
fn makes_copy(some_integer: i32) {
// 一个 i32 参数 some_integer 传入,有效
println!("{}", some_integer);
} // 函数结束, 参数 some_integer 是基本类型, 无需释放
fn f22() {
let s1 = gives_ownership();
// gives_ownership 移动它的返回值到 s1
let s2 = String::from("hello");
// s2 被声明有效
let s3 = takes_and_gives_back(s2);
// s2 被当作参数移动, s3 获得返回值所有权
} // s3 无效被释放, s2 被移动, s1 无效被释放.
fn gives_ownership() -> St | let some_string = String::from("hello");
// some_string 被声明有效
return some_string;
// some_string 被当作返回值移动出函数
}
fn takes_and_gives_back(a_string: String) -> String {
// a_string 被声明有效
a_string // a_string 被当作返回值移出函数
}
fn f23() {
let s1 = String::from("hello");
let s2 = &s1;
println!("s1 is {}, s2 is {}", s1, s2);
}
fn f24() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn f25() {
let s1 = String::from("hello");
let s2 = s1.clone();
let s3 = s1;
println!("{}", s2);
}
fn f26() {
let s1 = String::from("hello");
let mut s2 = &s1;
let s3 = s2;
s2 = &s3; // 重新从 s3 租借所有权
println!("{}", s2);
}
fn f27() {
let s1 = String::from("run");
let s2 = &s1;
println!("{}", s2);
println!("{}", s2);
}
fn f28() {
let mut s1 = String::from("run");
// s1 是可变的
let s2 = &mut s1;
// s2 是可变的引用
s2.push_str("oob");
println!("{}", s2);
}
fn f29() {
}
fn f30() {
let s = String::from("broadcast");
let part1 = &s[0..5];
let part2 = &s[5..9];
println!("{}={}+{}", s, part1, part2);
}
fn f31() {
let mut s = String::from("runoob");
let slice = &s[0..3];
println!("slice = {}", slice);
}
fn f32() {
let arr = [1, 3, 5, 7, 9];
let part = &arr[0..3];
for i in part.iter() {
println!("{}", i);
}
}
struct Site {
domain: String,
name: String,
nation: String,
found: u32
}
struct Color(u8, u8, u8);
struct Point2(f64, f64);
fn f33() {
struct Color(u8, u8, u8);
struct Point(f64, f64);
let black = Color(0, 0, 0);
let origin = Point(0.0, 0.0);
println!("black = ({}, {}, {})", black.0, black.1, black.2);
println!("origin = ({}, {})", origin.0, origin.1);
}
struct Rectangle2 {
width: u32,
height: u32,
}
fn f34() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1 is {:?}", rect1);
}
struct Rectangle3 {
width: u32,
height: u32,
}
fn f35() {
let rect1 = Rectangle { width: 30, height: 50 };
println!("rect1's area is {}", rect1.area());
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn wider(&self, rect: &Rectangle) -> bool {
self.width > rect.width
}
}
fn f36() {
let rect1 = Rectangle { width: 30, height: 50 };
let rect2 = Rectangle { width: 40, height: 20 };
println!("{}", rect1.wider(&rect2));
}
struct Rectangle4 {
width: u32,
height: u32,
}
impl Rectangle {
fn create(width: u32, height: u32) -> Rectangle {
Rectangle { width, height }
}
}
fn f37() {
let rect = Rectangle::create(30, 50);
println!("{:?}", rect);
}
#[derive(Debug)]
enum Book {
Papery, Electronic
}
fn f38() {
let book = Book::Papery;
println!("{:?}", book);
}
fn f39() {
enum Book {
Papery {index: u32},
Electronic {url: String},
}
let book = Book::Papery{index: 1001};
let ebook = Book::Electronic{url: String::from("url...")};
match book {
Book::Papery { index } => {
println!("Papery book {}", index);
},
Book::Electronic { url } => {
println!("E-book {}", url);
}
}
}
fn f40() {
let opt = Option::Some("Hello");
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f41() {
let opt: Option<&str> = Option::None;
match opt {
Option::Some(something) => {
println!("{}", something);
},
Option::None => {
println!("opt is nothing");
}
}
}
fn f42() {
let t = Some(64);
match t {
Some(64) => println!("Yes"),
_ => println!("No"),
}
}
fn f43() {
enum Book {
Papery(u32),
Electronic(String)
}
let book = Book::Electronic(String::from("url"));
if let Book::Papery(index) = book {
println!("Papery {}", index);
} else {
println!("Not papery book");
}
}
mod nation {
pub mod government {
pub fn govern() {}
}
mod congress {
pub fn legislate() {}
}
mod court {
fn judicial() {
super::congress::legislate();
}
}
}
fn f44() {
nation::government::govern();
}
mod back_of_house {
pub struct Breakfast {
pub toast: String,
seasonal_fruit: String,
}
impl Breakfast {
pub fn summer(toast: &str) -> Breakfast {
Breakfast {
toast: String::from(toast),
seasonal_fruit: String::from("peaches"),
}
}
}
}
pub fn eat_at_restaurant() {
let mut meal = back_of_house::Breakfast::summer("Rye");
meal.toast = String::from("Wheat");
println!("I'd like {} toast please", meal.toast);
}
fn f45() {
eat_at_restaurant()
}
mod SomeModule {
pub enum Person {
King {
name: String
},
Quene
}
}
fn f46() {
let person = SomeModule::Person::King{
name: String::from("Blue")
};
match person {
SomeModule::Person::King {name} => {
println!("{}", name);
}
_ => {}
}
}
fn max2(array: &[i32]) -> i32 {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i] > array[max_index] {
max_index = i;
}
i += 1;
}
array[max_index]
}
fn f47() {
let a = [2, 4, 6, 3, 1];
println!("max = {}", max(&a));
}
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn x(&self) -> &T {
&self.x
}
}
fn f48() {
let p = Point { x: 1, y: 2 };
println!("p.x = {}", p.x());
}
trait Descriptive {
fn describe(&self) -> String {
String::from("[Object]")
}
}
struct Person {
name: String,
age: u8
}
impl Descriptive for Person {
fn describe(&self) -> String {
format!("{} {}", self.name, self.age)
}
}
fn f49() {
let cali = Person {
name: String::from("Cali"),
age: 24
};
println!("{}", cali.describe());
}
trait Comparable {
fn compare(&self, object: &Self) -> i8;
}
fn max<T: Comparable>(array: &[T]) -> &T {
let mut max_index = 0;
let mut i = 1;
while i < array.len() {
if array[i].compare(&array[max_index]) > 0 {
max_index = i;
}
i += 1;
}
&array[max_index]
}
impl Comparable for f64 {
fn compare(&self, object: &f64) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
impl Comparable for i8 {
fn compare(&self, object: &i8) -> i8 {
if &self > &object { 1 }
else if &self == &object { 0 }
else { -1 }
}
}
fn f50() {
let arr = [1.0, 3.0, 5.0, 4.0, 2.0];
println!("maximum of arr is {}", max(&arr));
}
fn f51() {
let mut vector = vec![1, 2, 4, 8];
vector.push(16);
vector.push(32);
vector.push(64);
println!("{:?}", vector);
}
fn f52() {
let mut v1: Vec<i32> = vec![1, 2, 4, 8];
let mut v2: Vec<i32> = vec![16, 32, 64];
v1.append(&mut v2);
println!("{:?}", v1);
}
fn f53() {
let mut v = vec![1, 2, 4, 8];
println!("{}", match v.get(0) {
Some(value) => value.to_string(),
None => "None".to_string()
});
}
fn f54() {
let v = vec![1, 2, 4, 8];
println!("{}", v[1]);
}
fn f55() {
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
}
fn f56() {
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
}
fn f57() {
let s = String::from("hello中文");
for c in s.chars() {
println!("{}", c);
}
}
fn f58() {
let s = String::from("EN中文");
let a = s.chars().nth(2);
println!("{:?}", a);
}
fn f59() {
let s = String::from("EN中文");
let sub = &s[0..2];
println!("{}", sub);
}
fn f60() {
let s = String::from("ENEEEEEE");
let sub = &s[0..3];
println!("{}", sub);
}
pub struct ClassName {
field: i32,
}
impl ClassName {
pub fn new(value: i32) -> ClassName {
ClassName {
field: value
}
}
pub fn public_method(&self) {
println!("from public method");
self.private_method();
}
fn private_method(&self) {
println!("from private method");
}
}
fn f61() {
let object = ClassName::new(1024);
object.public_method();
}
fn timestamp1() -> i64 {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
let ms = since_the_epoch.as_secs() as i64 * 1000i64 + (since_the_epoch.subsec_nanos() as f64 / 1_000_000.0) as i64;
ms
}
fn main() {
let ts1 = timestamp1();
let mut i = 0;
while i < 10000 {
i = i+1;
f0();
f1();
f2();
f3();
f4();
f5();
f6();
f7();
f8();
f9();
f10();
f11();
f12();
f13();
f14();
f15();
f16();
f17();
f18();
f19();
f20();
f21();
f22();
f23();
f24();
f25();
f26();
f27();
f28();
f29();
f30();
f31();
f32();
f33();
f34();
f35();
f36();
f37();
f38();
f39();
f40();
f41();
f42();
f43();
f44();
f45();
f46();
f47();
f48();
f49();
f50();
f51();
f52();
f53();
f54();
f55();
f56();
f57();
f58();
f59();
f60();
f61();
}
let ts2 = timestamp1();
println!("TimeStamp2: {}", ts2);
println!("{:?}", ts2 - ts1);
}
| ring {
| identifier_name |
wakers.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Utilities which allow users to block on some future notification from LDK. These are
//! specifically used by [`ChannelManager`] to allow waiting until the [`ChannelManager`] needs to
//! be re-persisted.
//!
//! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
use alloc::sync::Arc;
use core::mem;
use sync::{Condvar, Mutex};
use prelude::*;
#[cfg(any(test, feature = "std"))]
use std::time::{Duration, Instant};
use core::future::Future as StdFuture;
use core::task::{Context, Poll};
use core::pin::Pin;
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
condvar: Condvar,
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
condvar: Condvar::new(),
}
}
pub(crate) fn wait(&self) {
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return;
}
guard = self.condvar.wait(guard).unwrap();
let result = guard.0;
if result {
guard.0 = false;
return
}
}
}
#[cfg(any(test, feature = "std"))]
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return true;
}
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
let result = guard.0;
if result || elapsed >= max_wait {
guard.0 = false;
return result;
}
match max_wait.checked_sub(elapsed) {
None => return result,
Some(_) => continue
}
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
lock.0 = true;
if let Some(future_state) = lock.1.take() {
future_state.lock().unwrap().complete();
}
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
let mut lock = self.notify_pending.lock().unwrap();
if lock.0 {
Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
}
} else if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn notify_pending(&self) -> bool {
self.notify_pending.lock().unwrap().0
}
}
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
/// taken later. Rust users should use the [`std::future::Future`] implementation for [`Future`]
/// instead.
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
pub trait FutureCallback : Send {
/// The method which is called.
fn call(&self);
}
impl<F: Fn() + Send> FutureCallback for F {
fn call(&self) { (self)(); }
}
pub(crate) struct FutureState {
callbacks: Vec<Box<dyn FutureCallback>>,
complete: bool,
}
impl FutureState {
fn complete(&mut self) {
for callback in self.callbacks.drain(..) {
callback.call();
}
self.complete = true;
}
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
impl Future {
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
mem::drop(state);
callback.call(); | }
}
mod std_future {
use core::task::Waker;
pub struct StdWaker(pub Waker);
impl super::FutureCallback for StdWaker {
fn call(&self) { self.0.wake_by_ref() }
}
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
state.callbacks.push(Box::new(std_future::StdWaker(waker)));
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
use sync::Arc;
use std::thread;
let persistence_notifier = Arc::new(Notifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let exit_thread = Arc::new(AtomicBool::new(false));
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
let mut lock = thread_notifier.notify_pending.lock().unwrap();
lock.0 = true;
thread_notifier.condvar.notify_all();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
}
});
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier.wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
exit_thread.store(true, Ordering::SeqCst);
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
if!persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
fn test_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(!callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
}
#[test]
fn test_pre_completed_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
future.state.lock().unwrap().complete();
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(callback.load(Ordering::SeqCst));
assert!(future.state.lock().unwrap().callbacks.is_empty());
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
// totally possible to construct from a trait implementation (though somewhat less effecient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);
unsafe fn wake_by_ref(ptr: *const ()) { let p = ptr as *const Arc<AtomicBool>; assert!(!(*p).fetch_or(true, Ordering::SeqCst)); }
unsafe fn drop(ptr: *const ()) { let p = ptr as *mut Arc<AtomicBool>; let _freed = Box::from_raw(p); }
unsafe fn wake(ptr: *const ()) { wake_by_ref(ptr); drop(ptr); }
unsafe fn waker_clone(ptr: *const ()) -> RawWaker {
let p = ptr as *const Arc<AtomicBool>;
RawWaker::new(Box::into_raw(Box::new(Arc::clone(&*p))) as *const (), &WAKER_V_TABLE)
}
fn create_waker() -> (Arc<AtomicBool>, Waker) {
let a = Arc::new(AtomicBool::new(false));
let waker = unsafe { Waker::from_raw(waker_clone((&a as *const Arc<AtomicBool>) as *const ())) };
(a, waker)
}
#[test]
fn test_future() {
let mut future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let mut second_future = Future { state: Arc::clone(&future.state) };
let (woken, waker) = create_waker();
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
assert!(!woken.load(Ordering::SeqCst));
let (second_woken, second_waker) = create_waker();
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Pending);
assert!(!second_woken.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(woken.load(Ordering::SeqCst));
assert!(second_woken.load(Ordering::SeqCst));
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Ready(()));
}
} | } else {
state.callbacks.push(callback);
} | random_line_split |
wakers.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Utilities which allow users to block on some future notification from LDK. These are
//! specifically used by [`ChannelManager`] to allow waiting until the [`ChannelManager`] needs to
//! be re-persisted.
//!
//! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
use alloc::sync::Arc;
use core::mem;
use sync::{Condvar, Mutex};
use prelude::*;
#[cfg(any(test, feature = "std"))]
use std::time::{Duration, Instant};
use core::future::Future as StdFuture;
use core::task::{Context, Poll};
use core::pin::Pin;
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
condvar: Condvar,
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
condvar: Condvar::new(),
}
}
pub(crate) fn wait(&self) {
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return;
}
guard = self.condvar.wait(guard).unwrap();
let result = guard.0;
if result {
guard.0 = false;
return
}
}
}
#[cfg(any(test, feature = "std"))]
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return true;
}
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
let result = guard.0;
if result || elapsed >= max_wait {
guard.0 = false;
return result;
}
match max_wait.checked_sub(elapsed) {
None => return result,
Some(_) => continue
}
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
lock.0 = true;
if let Some(future_state) = lock.1.take() {
future_state.lock().unwrap().complete();
}
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
let mut lock = self.notify_pending.lock().unwrap();
if lock.0 {
Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
}
} else if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn notify_pending(&self) -> bool {
self.notify_pending.lock().unwrap().0
}
}
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
/// taken later. Rust users should use the [`std::future::Future`] implementation for [`Future`]
/// instead.
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
pub trait FutureCallback : Send {
/// The method which is called.
fn call(&self);
}
impl<F: Fn() + Send> FutureCallback for F {
fn | (&self) { (self)(); }
}
pub(crate) struct FutureState {
callbacks: Vec<Box<dyn FutureCallback>>,
complete: bool,
}
impl FutureState {
fn complete(&mut self) {
for callback in self.callbacks.drain(..) {
callback.call();
}
self.complete = true;
}
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
impl Future {
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
mem::drop(state);
callback.call();
} else {
state.callbacks.push(callback);
}
}
}
mod std_future {
use core::task::Waker;
pub struct StdWaker(pub Waker);
impl super::FutureCallback for StdWaker {
fn call(&self) { self.0.wake_by_ref() }
}
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
state.callbacks.push(Box::new(std_future::StdWaker(waker)));
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
use sync::Arc;
use std::thread;
let persistence_notifier = Arc::new(Notifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let exit_thread = Arc::new(AtomicBool::new(false));
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
let mut lock = thread_notifier.notify_pending.lock().unwrap();
lock.0 = true;
thread_notifier.condvar.notify_all();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
}
});
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier.wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
exit_thread.store(true, Ordering::SeqCst);
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
if!persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
fn test_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(!callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
}
#[test]
fn test_pre_completed_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
future.state.lock().unwrap().complete();
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(callback.load(Ordering::SeqCst));
assert!(future.state.lock().unwrap().callbacks.is_empty());
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
// totally possible to construct from a trait implementation (though somewhat less effecient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);
unsafe fn wake_by_ref(ptr: *const ()) { let p = ptr as *const Arc<AtomicBool>; assert!(!(*p).fetch_or(true, Ordering::SeqCst)); }
unsafe fn drop(ptr: *const ()) { let p = ptr as *mut Arc<AtomicBool>; let _freed = Box::from_raw(p); }
unsafe fn wake(ptr: *const ()) { wake_by_ref(ptr); drop(ptr); }
unsafe fn waker_clone(ptr: *const ()) -> RawWaker {
let p = ptr as *const Arc<AtomicBool>;
RawWaker::new(Box::into_raw(Box::new(Arc::clone(&*p))) as *const (), &WAKER_V_TABLE)
}
fn create_waker() -> (Arc<AtomicBool>, Waker) {
let a = Arc::new(AtomicBool::new(false));
let waker = unsafe { Waker::from_raw(waker_clone((&a as *const Arc<AtomicBool>) as *const ())) };
(a, waker)
}
#[test]
fn test_future() {
let mut future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let mut second_future = Future { state: Arc::clone(&future.state) };
let (woken, waker) = create_waker();
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
assert!(!woken.load(Ordering::SeqCst));
let (second_woken, second_waker) = create_waker();
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Pending);
assert!(!second_woken.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(woken.load(Ordering::SeqCst));
assert!(second_woken.load(Ordering::SeqCst));
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Ready(()));
}
}
| call | identifier_name |
wakers.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Utilities which allow users to block on some future notification from LDK. These are
//! specifically used by [`ChannelManager`] to allow waiting until the [`ChannelManager`] needs to
//! be re-persisted.
//!
//! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
use alloc::sync::Arc;
use core::mem;
use sync::{Condvar, Mutex};
use prelude::*;
#[cfg(any(test, feature = "std"))]
use std::time::{Duration, Instant};
use core::future::Future as StdFuture;
use core::task::{Context, Poll};
use core::pin::Pin;
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
condvar: Condvar,
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
condvar: Condvar::new(),
}
}
pub(crate) fn wait(&self) {
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return;
}
guard = self.condvar.wait(guard).unwrap();
let result = guard.0;
if result {
guard.0 = false;
return
}
}
}
#[cfg(any(test, feature = "std"))]
pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
let current_time = Instant::now();
loop {
let mut guard = self.notify_pending.lock().unwrap();
if guard.0 {
guard.0 = false;
return true;
}
guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time.elapsed();
let result = guard.0;
if result || elapsed >= max_wait {
guard.0 = false;
return result;
}
match max_wait.checked_sub(elapsed) {
None => return result,
Some(_) => continue
}
}
}
/// Wake waiters, tracking that wake needs to occur even if there are currently no waiters.
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
lock.0 = true;
if let Some(future_state) = lock.1.take() {
future_state.lock().unwrap().complete();
}
mem::drop(lock);
self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future |
#[cfg(any(test, feature = "_test_utils"))]
pub fn notify_pending(&self) -> bool {
self.notify_pending.lock().unwrap().0
}
}
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
/// taken later. Rust users should use the [`std::future::Future`] implementation for [`Future`]
/// instead.
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
pub trait FutureCallback : Send {
/// The method which is called.
fn call(&self);
}
impl<F: Fn() + Send> FutureCallback for F {
fn call(&self) { (self)(); }
}
pub(crate) struct FutureState {
callbacks: Vec<Box<dyn FutureCallback>>,
complete: bool,
}
impl FutureState {
fn complete(&mut self) {
for callback in self.callbacks.drain(..) {
callback.call();
}
self.complete = true;
}
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
impl Future {
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
mem::drop(state);
callback.call();
} else {
state.callbacks.push(callback);
}
}
}
mod std_future {
use core::task::Waker;
pub struct StdWaker(pub Waker);
impl super::FutureCallback for StdWaker {
fn call(&self) { self.0.wake_by_ref() }
}
}
/// (C-not exported) as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
if state.complete {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
state.callbacks.push(Box::new(std_future::StdWaker(waker)));
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
use sync::Arc;
use std::thread;
let persistence_notifier = Arc::new(Notifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let exit_thread = Arc::new(AtomicBool::new(false));
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
let mut lock = thread_notifier.notify_pending.lock().unwrap();
lock.0 = true;
thread_notifier.condvar.notify_all();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
}
});
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier.wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
exit_thread.store(true, Ordering::SeqCst);
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
if!persistence_notifier.wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
fn test_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(!callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(callback.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
}
#[test]
fn test_pre_completed_future_callbacks() {
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
future.state.lock().unwrap().complete();
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
future.register_callback(Box::new(move || assert!(!callback_ref.fetch_or(true, Ordering::SeqCst))));
assert!(callback.load(Ordering::SeqCst));
assert!(future.state.lock().unwrap().callbacks.is_empty());
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
// totally possible to construct from a trait implementation (though somewhat less effecient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);
unsafe fn wake_by_ref(ptr: *const ()) { let p = ptr as *const Arc<AtomicBool>; assert!(!(*p).fetch_or(true, Ordering::SeqCst)); }
unsafe fn drop(ptr: *const ()) { let p = ptr as *mut Arc<AtomicBool>; let _freed = Box::from_raw(p); }
unsafe fn wake(ptr: *const ()) { wake_by_ref(ptr); drop(ptr); }
unsafe fn waker_clone(ptr: *const ()) -> RawWaker {
let p = ptr as *const Arc<AtomicBool>;
RawWaker::new(Box::into_raw(Box::new(Arc::clone(&*p))) as *const (), &WAKER_V_TABLE)
}
fn create_waker() -> (Arc<AtomicBool>, Waker) {
let a = Arc::new(AtomicBool::new(false));
let waker = unsafe { Waker::from_raw(waker_clone((&a as *const Arc<AtomicBool>) as *const ())) };
(a, waker)
}
#[test]
fn test_future() {
let mut future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
};
let mut second_future = Future { state: Arc::clone(&future.state) };
let (woken, waker) = create_waker();
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
assert!(!woken.load(Ordering::SeqCst));
let (second_woken, second_waker) = create_waker();
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Pending);
assert!(!second_woken.load(Ordering::SeqCst));
future.state.lock().unwrap().complete();
assert!(woken.load(Ordering::SeqCst));
assert!(second_woken.load(Ordering::SeqCst));
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
assert_eq!(Pin::new(&mut second_future).poll(&mut Context::from_waker(&second_waker)), Poll::Ready(()));
}
}
| {
let mut lock = self.notify_pending.lock().unwrap();
if lock.0 {
Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}))
}
} else if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
complete: false,
}));
lock.1 = Some(Arc::clone(&state));
Future { state }
}
} | identifier_body |
gamestate.rs | Board;
use crate::common::tile::{ TileId, Tile };
use crate::common::player::{ Player, PlayerId, PlayerColor };
use crate::common::penguin::Penguin;
use crate::common::action::{ Move, Placement };
use crate::common::boardposn::BoardPosn;
use crate::common::util;
use std::collections::HashSet;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fmt;
use serde::{ Serialize, Deserialize };
pub const MIN_PLAYERS_PER_GAME: usize = 2;
pub const MAX_PLAYERS_PER_GAME: usize = 4;
/// Each player receives 6 - player_count penguins to start the game
pub const PENGUIN_FACTOR: usize = 6;
/// Rc<RefCell<T>> gives a copiable, mutable reference to its T
///
/// This SharedGameState is a copiable, mutable pointer to the GameState
/// intended for use in the client since gtk requires ownership of the
/// data passed to its callbacks. Using this, one can pass a copy to each
/// callback and maintain a copy to overwrite with server updates as well.
pub type SharedGameState = Rc<RefCell<GameState>>;
/// The GameState contains the entirety of the current state
/// of the game. It is meant to be serialized into json from the server
/// and sent to each client to deserialize to receive the updated game
/// state each turn. The GameState is rendering-agnostic, so each
/// client is free to render the GameState however it wishes.
///
/// Throughout the gamestate, unique ids are usually used over the objects
/// they refer to so that we can (1) avoid excessive cloning from multiple mutable
/// borrows, (2) serialize the data more easily and (3) enable the creation of
/// external mappings on the server from e.g. PlayerId to some private data if needed.
///
/// - Each player's penguin is contained within the Player struct.
/// - Each penguin struct contains either Some(TileId) if it is currently
/// on a tile or None if it hasn't yet been placed.
/// - Each Player is mapped from their unique PlayerId to the Player struct.
/// - The ordering of players is given by the immutable turn_order. The current
/// turn is given by current_turn which will change each time
/// {place,move}_avatar_for_player is called.
/// - The GameState's current_turn player should never be stuck, unless
/// the game is over, i.e. current_player should always have moves.
/// Players' turns will be skipped in turn_order if they cannot move anymore.
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct GameState {
pub board: Board,
pub players: BTreeMap<PlayerId, Player>,
pub turn_order: Vec<PlayerId>, // INVARIANT: turn_order never changes for a given game, unless a player is kicked
pub current_turn: PlayerId,
pub winning_players: Option<Vec<PlayerId>>, // will be None until the game ends
}
impl fmt::Debug for GameState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut board_string = String::new();
for y in 0..self.board.height {
if y % 2 == 1 {
board_string.push_str(" ");
}
for x in 0..self.board.width {
let tile_string = match self.board.get_tile_id(x, y) {
Some(id) => {
match self.players.values().find(|player|
player.penguins.iter().any(|penguin| penguin.tile_id == Some(id)))
{
Some(player) => {
format!("P{}", player.player_id.0)
},
None => format!("{:2}", id.0),
}
},
None => " x".to_string(),
};
board_string.push_str(&tile_string);
board_string.push_str(" ");
}
board_string.push_str("\n");
}
writeln!(f, "{}", board_string)?;
// Write each player, their score, and their penguin positions
for (player_id, player) in self.players.iter() {
let current_player_str = if self.current_turn == *player_id { "<- current turn" } else { "" };
let penguins = util::map_slice(&player.penguins, |penguin| {
match penguin.tile_id {
Some(id) => format!("penguin on tile {}", id.0),
None => "unplaced".to_string(),
}
}).join(", ");
writeln!(f, "Player {} - {:?} - score: {} - penguins: [{}] {}",
player_id.0, player.color, player.score, penguins, current_player_str)?;
}
writeln!(f, "")
}
}
impl GameState {
/// Create a new GameState with the given board and player_count. Generates new
/// player ids for the number of players given.
/// This will panic if player_count is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn new(board: Board, player_count: usize) -> GameState {
GameState::with_players(board, (0..player_count).map(PlayerId).collect())
}
/// Create a new GameState with the given board and turn_order, with the player count equal
/// to the number of players in turn_order.
/// This will panic if turn_order.len() is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn with_players(board: Board, turn_order: Vec<PlayerId>) -> GameState {
// Each player receives 6 - N penguins, where N is the number of players
let penguins_per_player = PENGUIN_FACTOR - turn_order.len();
let players: BTreeMap<_, _> = turn_order.iter().zip(PlayerColor::iter()).map(|(id, color)| {
(*id, Player::new(*id, color, penguins_per_player))
}).collect();
let current_turn = turn_order[0];
GameState {
board,
players,
turn_order,
current_turn,
winning_players: None,
}
}
/// Creates a new gamestate with a board with a given number of rows and columns,
/// the given number of players, and no holes.
pub fn with_default_board(rows: u32, columns: u32, players: usize) -> GameState {
let board = Board::with_no_holes(rows, columns, 3);
GameState::new(board, players)
}
/// Places an unplaced avatar on a position on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
/// 3. Placement of a penguin that doesn't belong to the current player
pub fn place_avatar_for_player(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
self.place_avatar_without_changing_turn(player, tile)?;
self.advance_turn();
Some(())
}
/// Place a player's avatar but don't change whose turn it is.
/// This is useful to more easily place avatars in bulk during testing.
pub fn place_avatar_without_changing_turn(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
let occupied_tiles = self.get_occupied_tiles();
if occupied_tiles.contains(&tile) {
None
} else {
let player = self.players.get_mut(&player)?;
player.place_penguin(tile, &self.board)
}
}
/// Places an unplaced avatar on the given placement on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id))
}).collect()
}
/// Get a penguin at a position, None if no penguin at that position
#[allow(dead_code)]
pub fn find_penguin_at_position(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0.. self.players.len() {
if!self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if!self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)|!player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if!self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn {
self.previous_turn_index();
}
self.players.remove(&player_id);
self.turn_order.retain(|id| *id!= player_id);
// Now actually advance the turn after the player is removed to properly
// handle the case where we skip the turns of possibly multiple players
// whose penguins are all stuck.
if should_advance_turn {
self.advance_turn();
}
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::common::boardposn::BoardPosn;
#[test]
fn | () {
let board = Board::with_no_holes(3, 3, 3);
let gamestate = GameState::new(board, 4); // create game with 4 players
assert_eq!(gamestate.players.len(), 4);
// should have 6-n penguins per player
assert!(gamestate.players.iter().all(|(_, player)| player.penguins.len() == 2));
// does turn_order contain each of the players' ids exactly once?
assert_eq!(gamestate.turn_order.len(), gamestate.players.len());
assert!(gamestate.players.iter().all(|(id, _)| gamestate.turn_order.contains(id)), "{:?},\nturns={:?}", gamestate.players, gamestate.turn_order);
assert!(gamestate.winning_players.is_none()); // no winners yet
}
#[test]
fn test_can_any_player_move_penguin() {
// Can no players move when there's a penguin on the board, but holes blocking it in all directions?
let holes = util::map_slice(&[(1, 1), (1, 0), (0, 1)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(!gamestate.can_any_player_move_penguin());
// Can a player move when they have a penguin on the board with no holes blocking it?
let board = Board::with_no_holes(3, 3, 3);
let mut gamestate = GameState::new(board, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(gamestate.can_any_player_move_penguin());
// Can no players move when all penguins are blocked by holes or other penguins?
// 0(hole) 2(penguin)
// 1(penguin) 3(hole)
let holes = util::map_slice(&[(1, 1), (0, 0)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(1));
assert!(&gamestate.can_any_player_move_penguin()); // no penguin at 2, so can move
gamestate.place_avatar_without_changing_turn(player_id, TileId(2));
assert!(!gamestate.can_any_player_move_penguin()); // penguin at 2, so cannot move
}
#[test]
fn test_place_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
gamestate.board.remove_tile(TileId(5));
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Player places a penguin at a valid spot
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(4)), Some(()));
// Player tried to place a penguin at an invalid location
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(10)), None);
// Player tried to place a penguin at a hole
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(5)), None);
}
#[test]
fn test_move_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Reachable tiles from 0 are [0, 2, 1, 5]
let tile_0 = TileId(0);
let reachable_tile = TileId(5);
let unreachable_tile = TileId(3);
// Move failed: penguin not yet placed
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), None);
gamestate.place_avatar_without_changing_turn(player_id, tile_0);
// Move failed: tile not reachable from tile 0
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, tile_0), None);
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, unreachable_tile), None);
// success, penguin should now be on tile 5
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), Some(()));
// Finally, assert that the position of the penguin actually changed
let player = gamestate.players.iter_mut().nth(0).unwrap().1;
let penguin_pos = player.find_penguin_mut(reachable_tile).and_then(|penguin| penguin.tile_id);
assert_eq!(penguin | test_new | identifier_name |
gamestate.rs | Board;
use crate::common::tile::{ TileId, Tile };
use crate::common::player::{ Player, PlayerId, PlayerColor };
use crate::common::penguin::Penguin;
use crate::common::action::{ Move, Placement };
use crate::common::boardposn::BoardPosn;
use crate::common::util;
use std::collections::HashSet;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fmt;
use serde::{ Serialize, Deserialize };
pub const MIN_PLAYERS_PER_GAME: usize = 2;
pub const MAX_PLAYERS_PER_GAME: usize = 4;
/// Each player receives 6 - player_count penguins to start the game
pub const PENGUIN_FACTOR: usize = 6;
/// Rc<RefCell<T>> gives a copiable, mutable reference to its T
///
/// This SharedGameState is a copiable, mutable pointer to the GameState
/// intended for use in the client since gtk requires ownership of the
/// data passed to its callbacks. Using this, one can pass a copy to each
/// callback and maintain a copy to overwrite with server updates as well.
pub type SharedGameState = Rc<RefCell<GameState>>;
/// The GameState contains the entirety of the current state
/// of the game. It is meant to be serialized into json from the server
/// and sent to each client to deserialize to receive the updated game
/// state each turn. The GameState is rendering-agnostic, so each
/// client is free to render the GameState however it wishes.
///
/// Throughout the gamestate, unique ids are usually used over the objects
/// they refer to so that we can (1) avoid excessive cloning from multiple mutable
/// borrows, (2) serialize the data more easily and (3) enable the creation of
/// external mappings on the server from e.g. PlayerId to some private data if needed.
///
/// - Each player's penguin is contained within the Player struct.
/// - Each penguin struct contains either Some(TileId) if it is currently
/// on a tile or None if it hasn't yet been placed.
/// - Each Player is mapped from their unique PlayerId to the Player struct.
/// - The ordering of players is given by the immutable turn_order. The current
/// turn is given by current_turn which will change each time
/// {place,move}_avatar_for_player is called.
/// - The GameState's current_turn player should never be stuck, unless
/// the game is over, i.e. current_player should always have moves.
/// Players' turns will be skipped in turn_order if they cannot move anymore.
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct GameState {
pub board: Board,
pub players: BTreeMap<PlayerId, Player>,
pub turn_order: Vec<PlayerId>, // INVARIANT: turn_order never changes for a given game, unless a player is kicked
pub current_turn: PlayerId,
pub winning_players: Option<Vec<PlayerId>>, // will be None until the game ends
}
impl fmt::Debug for GameState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result | };
board_string.push_str(&tile_string);
board_string.push_str(" ");
}
board_string.push_str("\n");
}
writeln!(f, "{}", board_string)?;
// Write each player, their score, and their penguin positions
for (player_id, player) in self.players.iter() {
let current_player_str = if self.current_turn == *player_id { "<- current turn" } else { "" };
let penguins = util::map_slice(&player.penguins, |penguin| {
match penguin.tile_id {
Some(id) => format!("penguin on tile {}", id.0),
None => "unplaced".to_string(),
}
}).join(", ");
writeln!(f, "Player {} - {:?} - score: {} - penguins: [{}] {}",
player_id.0, player.color, player.score, penguins, current_player_str)?;
}
writeln!(f, "")
}
}
impl GameState {
/// Create a new GameState with the given board and player_count. Generates new
/// player ids for the number of players given.
/// This will panic if player_count is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn new(board: Board, player_count: usize) -> GameState {
GameState::with_players(board, (0..player_count).map(PlayerId).collect())
}
/// Create a new GameState with the given board and turn_order, with the player count equal
/// to the number of players in turn_order.
/// This will panic if turn_order.len() is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn with_players(board: Board, turn_order: Vec<PlayerId>) -> GameState {
// Each player receives 6 - N penguins, where N is the number of players
let penguins_per_player = PENGUIN_FACTOR - turn_order.len();
let players: BTreeMap<_, _> = turn_order.iter().zip(PlayerColor::iter()).map(|(id, color)| {
(*id, Player::new(*id, color, penguins_per_player))
}).collect();
let current_turn = turn_order[0];
GameState {
board,
players,
turn_order,
current_turn,
winning_players: None,
}
}
/// Creates a new gamestate with a board with a given number of rows and columns,
/// the given number of players, and no holes.
pub fn with_default_board(rows: u32, columns: u32, players: usize) -> GameState {
let board = Board::with_no_holes(rows, columns, 3);
GameState::new(board, players)
}
/// Places an unplaced avatar on a position on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
/// 3. Placement of a penguin that doesn't belong to the current player
pub fn place_avatar_for_player(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
self.place_avatar_without_changing_turn(player, tile)?;
self.advance_turn();
Some(())
}
/// Place a player's avatar but don't change whose turn it is.
/// This is useful to more easily place avatars in bulk during testing.
pub fn place_avatar_without_changing_turn(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
let occupied_tiles = self.get_occupied_tiles();
if occupied_tiles.contains(&tile) {
None
} else {
let player = self.players.get_mut(&player)?;
player.place_penguin(tile, &self.board)
}
}
/// Places an unplaced avatar on the given placement on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id))
}).collect()
}
/// Get a penguin at a position, None if no penguin at that position
#[allow(dead_code)]
pub fn find_penguin_at_position(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0.. self.players.len() {
if!self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if!self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)|!player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if!self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn {
self.previous_turn_index();
}
self.players.remove(&player_id);
self.turn_order.retain(|id| *id!= player_id);
// Now actually advance the turn after the player is removed to properly
// handle the case where we skip the turns of possibly multiple players
// whose penguins are all stuck.
if should_advance_turn {
self.advance_turn();
}
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::common::boardposn::BoardPosn;
#[test]
fn test_new() {
let board = Board::with_no_holes(3, 3, 3);
let gamestate = GameState::new(board, 4); // create game with 4 players
assert_eq!(gamestate.players.len(), 4);
// should have 6-n penguins per player
assert!(gamestate.players.iter().all(|(_, player)| player.penguins.len() == 2));
// does turn_order contain each of the players' ids exactly once?
assert_eq!(gamestate.turn_order.len(), gamestate.players.len());
assert!(gamestate.players.iter().all(|(id, _)| gamestate.turn_order.contains(id)), "{:?},\nturns={:?}", gamestate.players, gamestate.turn_order);
assert!(gamestate.winning_players.is_none()); // no winners yet
}
#[test]
fn test_can_any_player_move_penguin() {
// Can no players move when there's a penguin on the board, but holes blocking it in all directions?
let holes = util::map_slice(&[(1, 1), (1, 0), (0, 1)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(!gamestate.can_any_player_move_penguin());
// Can a player move when they have a penguin on the board with no holes blocking it?
let board = Board::with_no_holes(3, 3, 3);
let mut gamestate = GameState::new(board, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(gamestate.can_any_player_move_penguin());
// Can no players move when all penguins are blocked by holes or other penguins?
// 0(hole) 2(penguin)
// 1(penguin) 3(hole)
let holes = util::map_slice(&[(1, 1), (0, 0)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(1));
assert!(&gamestate.can_any_player_move_penguin()); // no penguin at 2, so can move
gamestate.place_avatar_without_changing_turn(player_id, TileId(2));
assert!(!gamestate.can_any_player_move_penguin()); // penguin at 2, so cannot move
}
#[test]
fn test_place_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
gamestate.board.remove_tile(TileId(5));
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Player places a penguin at a valid spot
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(4)), Some(()));
// Player tried to place a penguin at an invalid location
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(10)), None);
// Player tried to place a penguin at a hole
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(5)), None);
}
#[test]
fn test_move_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Reachable tiles from 0 are [0, 2, 1, 5]
let tile_0 = TileId(0);
let reachable_tile = TileId(5);
let unreachable_tile = TileId(3);
// Move failed: penguin not yet placed
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), None);
gamestate.place_avatar_without_changing_turn(player_id, tile_0);
// Move failed: tile not reachable from tile 0
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, tile_0), None);
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, unreachable_tile), None);
// success, penguin should now be on tile 5
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), Some(()));
// Finally, assert that the position of the penguin actually changed
let player = gamestate.players.iter_mut().nth(0).unwrap().1;
let penguin_pos = player.find_penguin_mut(reachable_tile).and_then(|penguin| penguin.tile_id);
assert_eq!(peng | {
let mut board_string = String::new();
for y in 0..self.board.height {
if y % 2 == 1 {
board_string.push_str(" ");
}
for x in 0..self.board.width {
let tile_string = match self.board.get_tile_id(x, y) {
Some(id) => {
match self.players.values().find(|player|
player.penguins.iter().any(|penguin| penguin.tile_id == Some(id)))
{
Some(player) => {
format!("P{}", player.player_id.0)
},
None => format!("{:2}", id.0),
}
},
None => " x".to_string(), | identifier_body |
gamestate.rs | Board;
use crate::common::tile::{ TileId, Tile };
use crate::common::player::{ Player, PlayerId, PlayerColor };
use crate::common::penguin::Penguin;
use crate::common::action::{ Move, Placement };
use crate::common::boardposn::BoardPosn;
use crate::common::util;
use std::collections::HashSet;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fmt;
use serde::{ Serialize, Deserialize };
pub const MIN_PLAYERS_PER_GAME: usize = 2;
pub const MAX_PLAYERS_PER_GAME: usize = 4;
/// Each player receives 6 - player_count penguins to start the game
pub const PENGUIN_FACTOR: usize = 6;
/// Rc<RefCell<T>> gives a copiable, mutable reference to its T
///
/// This SharedGameState is a copiable, mutable pointer to the GameState
/// intended for use in the client since gtk requires ownership of the
/// data passed to its callbacks. Using this, one can pass a copy to each
/// callback and maintain a copy to overwrite with server updates as well.
pub type SharedGameState = Rc<RefCell<GameState>>;
/// The GameState contains the entirety of the current state
/// of the game. It is meant to be serialized into json from the server
/// and sent to each client to deserialize to receive the updated game
/// state each turn. The GameState is rendering-agnostic, so each
/// client is free to render the GameState however it wishes.
///
/// Throughout the gamestate, unique ids are usually used over the objects
/// they refer to so that we can (1) avoid excessive cloning from multiple mutable
/// borrows, (2) serialize the data more easily and (3) enable the creation of
/// external mappings on the server from e.g. PlayerId to some private data if needed.
///
/// - Each player's penguin is contained within the Player struct.
/// - Each penguin struct contains either Some(TileId) if it is currently
/// on a tile or None if it hasn't yet been placed.
/// - Each Player is mapped from their unique PlayerId to the Player struct.
/// - The ordering of players is given by the immutable turn_order. The current
/// turn is given by current_turn which will change each time
/// {place,move}_avatar_for_player is called.
/// - The GameState's current_turn player should never be stuck, unless
/// the game is over, i.e. current_player should always have moves.
/// Players' turns will be skipped in turn_order if they cannot move anymore.
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct GameState {
pub board: Board,
pub players: BTreeMap<PlayerId, Player>,
pub turn_order: Vec<PlayerId>, // INVARIANT: turn_order never changes for a given game, unless a player is kicked
pub current_turn: PlayerId,
pub winning_players: Option<Vec<PlayerId>>, // will be None until the game ends
}
impl fmt::Debug for GameState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut board_string = String::new();
for y in 0..self.board.height {
if y % 2 == 1 {
board_string.push_str(" ");
}
for x in 0..self.board.width {
let tile_string = match self.board.get_tile_id(x, y) {
Some(id) => {
match self.players.values().find(|player|
player.penguins.iter().any(|penguin| penguin.tile_id == Some(id)))
{
Some(player) => {
format!("P{}", player.player_id.0)
},
None => format!("{:2}", id.0),
}
},
None => " x".to_string(),
};
board_string.push_str(&tile_string);
board_string.push_str(" ");
}
board_string.push_str("\n");
}
writeln!(f, "{}", board_string)?;
// Write each player, their score, and their penguin positions
for (player_id, player) in self.players.iter() {
let current_player_str = if self.current_turn == *player_id { "<- current turn" } else { "" };
let penguins = util::map_slice(&player.penguins, |penguin| {
match penguin.tile_id {
Some(id) => format!("penguin on tile {}", id.0),
None => "unplaced".to_string(),
}
}).join(", ");
writeln!(f, "Player {} - {:?} - score: {} - penguins: [{}] {}",
player_id.0, player.color, player.score, penguins, current_player_str)?;
}
writeln!(f, "")
}
}
impl GameState {
/// Create a new GameState with the given board and player_count. Generates new
/// player ids for the number of players given.
/// This will panic if player_count is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn new(board: Board, player_count: usize) -> GameState {
GameState::with_players(board, (0..player_count).map(PlayerId).collect())
}
/// Create a new GameState with the given board and turn_order, with the player count equal
/// to the number of players in turn_order.
/// This will panic if turn_order.len() is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn with_players(board: Board, turn_order: Vec<PlayerId>) -> GameState {
// Each player receives 6 - N penguins, where N is the number of players
let penguins_per_player = PENGUIN_FACTOR - turn_order.len();
let players: BTreeMap<_, _> = turn_order.iter().zip(PlayerColor::iter()).map(|(id, color)| {
(*id, Player::new(*id, color, penguins_per_player))
}).collect();
let current_turn = turn_order[0];
GameState {
board,
players,
turn_order,
current_turn,
winning_players: None,
}
}
/// Creates a new gamestate with a board with a given number of rows and columns,
/// the given number of players, and no holes.
pub fn with_default_board(rows: u32, columns: u32, players: usize) -> GameState {
let board = Board::with_no_holes(rows, columns, 3);
GameState::new(board, players)
}
/// Places an unplaced avatar on a position on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
/// 3. Placement of a penguin that doesn't belong to the current player
pub fn place_avatar_for_player(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
self.place_avatar_without_changing_turn(player, tile)?;
self.advance_turn();
Some(())
}
/// Place a player's avatar but don't change whose turn it is.
/// This is useful to more easily place avatars in bulk during testing.
pub fn place_avatar_without_changing_turn(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
let occupied_tiles = self.get_occupied_tiles();
if occupied_tiles.contains(&tile) {
None
} else {
let player = self.players.get_mut(&player)?;
player.place_penguin(tile, &self.board)
}
}
/// Places an unplaced avatar on the given placement on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id))
}).collect()
}
/// Get a penguin at a position, None if no penguin at that position
#[allow(dead_code)]
pub fn find_penguin_at_position(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0.. self.players.len() {
if!self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if!self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)|!player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if!self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn |
self.players.remove(&player_id);
self.turn_order.retain(|id| *id!= player_id);
// Now actually advance the turn after the player is removed to properly
// handle the case where we skip the turns of possibly multiple players
// whose penguins are all stuck.
if should_advance_turn {
self.advance_turn();
}
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::common::boardposn::BoardPosn;
#[test]
fn test_new() {
let board = Board::with_no_holes(3, 3, 3);
let gamestate = GameState::new(board, 4); // create game with 4 players
assert_eq!(gamestate.players.len(), 4);
// should have 6-n penguins per player
assert!(gamestate.players.iter().all(|(_, player)| player.penguins.len() == 2));
// does turn_order contain each of the players' ids exactly once?
assert_eq!(gamestate.turn_order.len(), gamestate.players.len());
assert!(gamestate.players.iter().all(|(id, _)| gamestate.turn_order.contains(id)), "{:?},\nturns={:?}", gamestate.players, gamestate.turn_order);
assert!(gamestate.winning_players.is_none()); // no winners yet
}
#[test]
fn test_can_any_player_move_penguin() {
// Can no players move when there's a penguin on the board, but holes blocking it in all directions?
let holes = util::map_slice(&[(1, 1), (1, 0), (0, 1)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(!gamestate.can_any_player_move_penguin());
// Can a player move when they have a penguin on the board with no holes blocking it?
let board = Board::with_no_holes(3, 3, 3);
let mut gamestate = GameState::new(board, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(gamestate.can_any_player_move_penguin());
// Can no players move when all penguins are blocked by holes or other penguins?
// 0(hole) 2(penguin)
// 1(penguin) 3(hole)
let holes = util::map_slice(&[(1, 1), (0, 0)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(1));
assert!(&gamestate.can_any_player_move_penguin()); // no penguin at 2, so can move
gamestate.place_avatar_without_changing_turn(player_id, TileId(2));
assert!(!gamestate.can_any_player_move_penguin()); // penguin at 2, so cannot move
}
#[test]
fn test_place_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
gamestate.board.remove_tile(TileId(5));
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Player places a penguin at a valid spot
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(4)), Some(()));
// Player tried to place a penguin at an invalid location
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(10)), None);
// Player tried to place a penguin at a hole
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(5)), None);
}
#[test]
fn test_move_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Reachable tiles from 0 are [0, 2, 1, 5]
let tile_0 = TileId(0);
let reachable_tile = TileId(5);
let unreachable_tile = TileId(3);
// Move failed: penguin not yet placed
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), None);
gamestate.place_avatar_without_changing_turn(player_id, tile_0);
// Move failed: tile not reachable from tile 0
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, tile_0), None);
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, unreachable_tile), None);
// success, penguin should now be on tile 5
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), Some(()));
// Finally, assert that the position of the penguin actually changed
let player = gamestate.players.iter_mut().nth(0).unwrap().1;
let penguin_pos = player.find_penguin_mut(reachable_tile).and_then(|penguin| penguin.tile_id);
assert_eq!(peng | {
self.previous_turn_index();
} | conditional_block |
gamestate.rs | ::Board;
use crate::common::tile::{ TileId, Tile };
use crate::common::player::{ Player, PlayerId, PlayerColor };
use crate::common::penguin::Penguin;
use crate::common::action::{ Move, Placement };
use crate::common::boardposn::BoardPosn;
use crate::common::util;
use std::collections::HashSet;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fmt;
use serde::{ Serialize, Deserialize };
pub const MIN_PLAYERS_PER_GAME: usize = 2;
pub const MAX_PLAYERS_PER_GAME: usize = 4;
/// Each player receives 6 - player_count penguins to start the game
pub const PENGUIN_FACTOR: usize = 6;
/// Rc<RefCell<T>> gives a copiable, mutable reference to its T
///
/// This SharedGameState is a copiable, mutable pointer to the GameState
/// intended for use in the client since gtk requires ownership of the
/// data passed to its callbacks. Using this, one can pass a copy to each
/// callback and maintain a copy to overwrite with server updates as well.
pub type SharedGameState = Rc<RefCell<GameState>>;
/// The GameState contains the entirety of the current state
/// of the game. It is meant to be serialized into json from the server
/// and sent to each client to deserialize to receive the updated game
/// state each turn. The GameState is rendering-agnostic, so each
/// client is free to render the GameState however it wishes.
///
/// Throughout the gamestate, unique ids are usually used over the objects
/// they refer to so that we can (1) avoid excessive cloning from multiple mutable
/// borrows, (2) serialize the data more easily and (3) enable the creation of
/// external mappings on the server from e.g. PlayerId to some private data if needed.
///
/// - Each player's penguin is contained within the Player struct.
/// - Each penguin struct contains either Some(TileId) if it is currently
/// on a tile or None if it hasn't yet been placed.
/// - Each Player is mapped from their unique PlayerId to the Player struct.
/// - The ordering of players is given by the immutable turn_order. The current
/// turn is given by current_turn which will change each time
/// {place,move}_avatar_for_player is called.
/// - The GameState's current_turn player should never be stuck, unless
/// the game is over, i.e. current_player should always have moves.
/// Players' turns will be skipped in turn_order if they cannot move anymore.
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct GameState {
pub board: Board,
pub players: BTreeMap<PlayerId, Player>,
pub turn_order: Vec<PlayerId>, // INVARIANT: turn_order never changes for a given game, unless a player is kicked
pub current_turn: PlayerId,
pub winning_players: Option<Vec<PlayerId>>, // will be None until the game ends
}
impl fmt::Debug for GameState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut board_string = String::new();
for y in 0..self.board.height {
if y % 2 == 1 {
board_string.push_str(" ");
}
for x in 0..self.board.width {
let tile_string = match self.board.get_tile_id(x, y) {
Some(id) => {
match self.players.values().find(|player|
player.penguins.iter().any(|penguin| penguin.tile_id == Some(id)))
{
Some(player) => {
format!("P{}", player.player_id.0)
},
None => format!("{:2}", id.0),
}
},
None => " x".to_string(),
};
board_string.push_str(&tile_string);
board_string.push_str(" ");
}
board_string.push_str("\n");
}
writeln!(f, "{}", board_string)?;
// Write each player, their score, and their penguin positions
for (player_id, player) in self.players.iter() {
let current_player_str = if self.current_turn == *player_id { "<- current turn" } else { "" };
let penguins = util::map_slice(&player.penguins, |penguin| {
match penguin.tile_id {
Some(id) => format!("penguin on tile {}", id.0),
None => "unplaced".to_string(),
}
}).join(", ");
writeln!(f, "Player {} - {:?} - score: {} - penguins: [{}] {}",
player_id.0, player.color, player.score, penguins, current_player_str)?;
}
writeln!(f, "")
}
}
impl GameState {
/// Create a new GameState with the given board and player_count. Generates new
/// player ids for the number of players given.
/// This will panic if player_count is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn new(board: Board, player_count: usize) -> GameState {
GameState::with_players(board, (0..player_count).map(PlayerId).collect())
}
/// Create a new GameState with the given board and turn_order, with the player count equal
/// to the number of players in turn_order.
/// This will panic if turn_order.len() is < MIN_PLAYERS_PER_GAME or > MAX_PLAYERS_PER_GAME.
pub fn with_players(board: Board, turn_order: Vec<PlayerId>) -> GameState {
// Each player receives 6 - N penguins, where N is the number of players
let penguins_per_player = PENGUIN_FACTOR - turn_order.len();
let players: BTreeMap<_, _> = turn_order.iter().zip(PlayerColor::iter()).map(|(id, color)| {
(*id, Player::new(*id, color, penguins_per_player))
}).collect();
let current_turn = turn_order[0];
GameState {
board,
players,
turn_order,
current_turn,
winning_players: None,
}
}
/// Creates a new gamestate with a board with a given number of rows and columns,
/// the given number of players, and no holes.
pub fn with_default_board(rows: u32, columns: u32, players: usize) -> GameState {
let board = Board::with_no_holes(rows, columns, 3);
GameState::new(board, players)
}
/// Places an unplaced avatar on a position on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
/// 3. Placement of a penguin that doesn't belong to the current player
pub fn place_avatar_for_player(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
self.place_avatar_without_changing_turn(player, tile)?;
self.advance_turn();
Some(())
}
/// Place a player's avatar but don't change whose turn it is.
/// This is useful to more easily place avatars in bulk during testing.
pub fn place_avatar_without_changing_turn(&mut self, player: PlayerId, tile: TileId) -> Option<()> {
let occupied_tiles = self.get_occupied_tiles();
if occupied_tiles.contains(&tile) {
None
} else {
let player = self.players.get_mut(&player)?;
player.place_penguin(tile, &self.board)
}
}
/// Places an unplaced avatar on the given placement on the board, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid placement.
/// An invalid placement is one of:
/// 1. Placement on an invalid position (either out of bounds or a hole)
/// 2. Placement when the players' avatars are already placed
///
/// This function will choose which penguin to place for the current player, so it is
/// impossible for the player to place a penguin that is not theirs.
pub fn place_avatar_for_current_player(&mut self, placement: Placement) -> Option<()> {
self.place_avatar_for_player(self.current_turn, placement.tile_id)
}
/// Moves a placed avatar from one position to another on the board,
/// removes the tile that penguin was on, and advances the turn.
/// Returns Some(()) on success, or None if the player makes an invalid move.
/// An invalid move is one of:
/// 1. Move to an invalid position (either out of bounds or hole)
/// 2. Move when the current avatar has yet to be placed
/// 3. Move to a tile that is not accessible within a straight line
/// of the current tile, with no holes in between.
/// 4. Move of a penguin that doesn't belong to the player
pub fn move_avatar_for_player_without_changing_turn(&mut self, player: PlayerId, penguin_start_tile: TileId, destination: TileId) -> Option<()> {
let occupied = &self.get_occupied_tiles();
let player = self.players.get_mut(&player)?;
player.move_penguin(penguin_start_tile, destination, &self.board, occupied)?;
player.score += self.board.remove_tile(penguin_start_tile);
Some(())
}
/// Helper function which moves an avatar for the player whose turn it currently is.
pub fn move_avatar_for_current_player(&mut self, move_: Move) -> Option<()> {
self.move_avatar_for_player_without_changing_turn(self.current_turn, move_.from, move_.to)?;
self.advance_turn();
Some(())
}
/// Retrieve a tile by its ID. Will return None if the id
/// does not reference any existing tile. This can happen
/// if the tile was removed and has become a hole in the board.
pub fn get_tile(&self, tile_id: TileId) -> Option<&Tile> {
self.board.tiles.get(&tile_id)
}
/// Gets the color of the player whose penguin is on a certain tile
/// Returns None if there is no penguin on that tile
pub fn get_color_on_tile(&self, tile_id: TileId) -> Option<PlayerColor> {
self.players.iter().find_map(|(_, player)| {
let is_penguin_on_tile = player.penguins.iter().any(|penguin| penguin.tile_id == Some(tile_id));
if is_penguin_on_tile {
Some(player.color)
} else {
None
}
})
}
/// Returns true if any player has a penguin they can move,
/// false if not (and the game is thus over)
pub fn can_any_player_move_penguin(&self) -> bool {
let occupied_tiles = self.get_occupied_tiles();
self.players.iter().any(|(_, player)| player.can_move_a_penguin(&self.board, &occupied_tiles))
}
/// Returns true if the given player can move a penguin
pub fn can_player_move(&self, player: PlayerId) -> bool {
self.players.get(&player).map_or(false, |player|
player.can_move_a_penguin(&self.board, &self.get_occupied_tiles()))
}
/// Returns the set of tiles on this gamestate's board which have a penguin on them
pub fn get_occupied_tiles(&self) -> HashSet<TileId> {
self.players.iter()
.flat_map(|(_, player)| player.penguins.iter().filter_map(|penguin| penguin.tile_id))
.collect()
}
/// Gets all valid moves for the current GameState,
/// meaning only move the current player can make
pub fn get_valid_moves(&self) -> Vec<Move> {
let occupied_tiles = self.get_occupied_tiles();
let penguins_to_move = &self.current_player().penguins;
penguins_to_move.iter().flat_map(|penguin| {
// penguins in Games are placed, so should always be Some
let starting_tile_id = penguin.tile_id.expect("A penguin was not placed!");
let starting_tile = self.get_tile(starting_tile_id).expect("A penguin is placed on a hole");
starting_tile.all_reachable_tiles(&self.board, &occupied_tiles)
.into_iter()
.map(move |destination| Move::new(starting_tile_id, destination.tile_id))
}).collect()
}
/// Get a penguin at a position, None if no penguin at that position
#[allow(dead_code)]
pub fn find_penguin_at_position(&self, posn: BoardPosn) -> Option<&Penguin> {
let tile = self.board.get_tile_id(posn.x, posn.y)?;
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Search for the penguin at the given TileId and return it if possible.
/// Returns None if no penguin at that location was found.
pub fn find_penguin(&self, tile: TileId) -> Option<&Penguin> {
self.players.iter().find_map(|(_, player)| {
player.find_penguin(tile)
})
}
/// Returns the player whose turn it currently is
pub fn current_player(&self) -> &Player {
self.players.get(&self.current_turn).unwrap()
}
/// Is this game over? We define a game to be "over" if either
/// some players have won, or there are no players left in the game.
pub fn is_game_over(&self) -> bool {
self.winning_players.is_some() || self.players.is_empty()
}
#[allow(dead_code)]
pub fn get_player_by_color_mut(&mut self, color: PlayerColor) -> Option<&mut Player> {
self.players.iter_mut()
.find(|(_, player)| player.color == color)
.map(|(_, player)| player)
}
/// Advance the turn of this game to the next player's turn
/// Will mutate this game's current_turn field.
///
/// Note that this will skip the turn of any player who cannot
/// move any penguins. It is an invalid game state for the current
/// turn to be a player who cannot move any penguins.
pub fn advance_turn(&mut self) {
self.advance_turn_index();
for _ in 0.. self.players.len() {
if!self.current_player().has_unplaced_penguins() && self.get_valid_moves().is_empty() {
self.advance_turn_index()
} else {
return;
}
}
// No players have any moves left, find the winning players by those with the maximum score
self.winning_players = Some(util::all_max_by_key(self.players.iter(), |(_, player)| player.score)
.map(|(id, _)| *id).collect());
}
/// Sets the turn of this game to the next player in order
fn advance_turn_index(&mut self) {
if!self.turn_order.is_empty() {
let current_turn_index = self.turn_order.iter().position(|id| id == &self.current_turn).unwrap();
let next_turn_index = (current_turn_index + 1) % self.turn_order.len();
self.current_turn = self.turn_order[next_turn_index];
}
}
/// Sets the turn of the game to the previous player's turn, used when removing a player.
fn previous_turn_index(&mut self) {
let current_turn_index = self.turn_order.iter()
.position(|id| id == &self.current_turn).unwrap();
let prev_turn_index = if current_turn_index == 0 {
self.turn_order.len().saturating_sub(1)
} else {
(current_turn_index - 1) % self.turn_order.len()
};
self.current_turn = self.turn_order[prev_turn_index];
}
pub fn player_score(&self, player_id: PlayerId) -> usize {
self.players[&player_id].score
}
/// Returns true if all penguins have a concrete position on the board.
/// If this is false then we are still in the PlacePenguins phase of the game.
pub fn all_penguins_are_placed(&self) -> bool {
self.players.iter().all(|(_, player)|!player.has_unplaced_penguins())
}
/// Removes a player and its penguins from this game
pub fn remove_player(&mut self, player_id: PlayerId) {
if!self.is_game_over() {
let should_advance_turn = self.current_turn == player_id;
// Prepare to advance the current turn past the to-be-removed player
if should_advance_turn {
self.previous_turn_index();
}
self.players.remove(&player_id);
self.turn_order.retain(|id| *id!= player_id);
// Now actually advance the turn after the player is removed to properly
// handle the case where we skip the turns of possibly multiple players
// whose penguins are all stuck.
if should_advance_turn {
self.advance_turn();
}
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::common::boardposn::BoardPosn;
#[test]
fn test_new() {
let board = Board::with_no_holes(3, 3, 3);
let gamestate = GameState::new(board, 4); // create game with 4 players
assert_eq!(gamestate.players.len(), 4);
// should have 6-n penguins per player
assert!(gamestate.players.iter().all(|(_, player)| player.penguins.len() == 2));
// does turn_order contain each of the players' ids exactly once?
assert_eq!(gamestate.turn_order.len(), gamestate.players.len());
assert!(gamestate.players.iter().all(|(id, _)| gamestate.turn_order.contains(id)), "{:?},\nturns={:?}", gamestate.players, gamestate.turn_order);
assert!(gamestate.winning_players.is_none()); // no winners yet
}
#[test]
fn test_can_any_player_move_penguin() {
// Can no players move when there's a penguin on the board, but holes blocking it in all directions?
let holes = util::map_slice(&[(1, 1), (1, 0), (0, 1)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(!gamestate.can_any_player_move_penguin());
// Can a player move when they have a penguin on the board with no holes blocking it?
let board = Board::with_no_holes(3, 3, 3);
let mut gamestate = GameState::new(board, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(0));
assert!(gamestate.can_any_player_move_penguin());
// Can no players move when all penguins are blocked by holes or other penguins?
// 0(hole) 2(penguin)
// 1(penguin) 3(hole)
let holes = util::map_slice(&[(1, 1), (0, 0)], |pos| BoardPosn::from(*pos));
let board_with_holes = Board::with_holes(2, 2, holes, 1);
let mut gamestate = GameState::new(board_with_holes, 4);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
assert!(!gamestate.can_any_player_move_penguin());
gamestate.place_avatar_without_changing_turn(player_id, TileId(1));
assert!(&gamestate.can_any_player_move_penguin()); // no penguin at 2, so can move
gamestate.place_avatar_without_changing_turn(player_id, TileId(2));
assert!(!gamestate.can_any_player_move_penguin()); // penguin at 2, so cannot move
}
#[test]
fn test_place_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
gamestate.board.remove_tile(TileId(5));
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Player places a penguin at a valid spot
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(4)), Some(()));
// Player tried to place a penguin at an invalid location
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(10)), None);
// Player tried to place a penguin at a hole
assert_eq!(gamestate.place_avatar_without_changing_turn(player_id, TileId(5)), None);
}
#[test]
fn test_move_avatar() {
let mut gamestate = GameState::with_default_board(3, 3, 2);
let player_id = *gamestate.players.iter().nth(0).unwrap().0;
// Reachable tiles from 0 are [0, 2, 1, 5]
let tile_0 = TileId(0);
let reachable_tile = TileId(5);
let unreachable_tile = TileId(3);
// Move failed: penguin not yet placed
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), None);
gamestate.place_avatar_without_changing_turn(player_id, tile_0);
| assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, tile_0), None);
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, unreachable_tile), None);
// success, penguin should now be on tile 5
assert_eq!(gamestate.move_avatar_for_player_without_changing_turn(player_id, tile_0, reachable_tile), Some(()));
// Finally, assert that the position of the penguin actually changed
let player = gamestate.players.iter_mut().nth(0).unwrap().1;
let penguin_pos = player.find_penguin_mut(reachable_tile).and_then(|penguin| penguin.tile_id);
assert_eq!(penguin_ | // Move failed: tile not reachable from tile 0 | random_line_split |
main.rs | //! [![github]](https://github.com/dtolnay/star-history) [![crates-io]](https://crates.io/crates/star-history) [![docs-rs]](https://docs.rs/star-history)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
#![allow(
clippy::cast_lossless,
clippy::default_trait_access,
clippy::let_underscore_untyped,
// Clippy bug: https://github.com/rust-lang/rust-clippy/issues/7422
clippy::nonstandard_macro_braces,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::toplevel_ref_arg,
clippy::uninlined_format_args,
)]
mod log;
use crate::log::Log;
use chrono::{DateTime, Duration, Utc};
use reqwest::blocking::Client;
use reqwest::header::{AUTHORIZATION, USER_AGENT};
use serde::de::{self, Deserialize, Deserializer, IgnoredAny, MapAccess, SeqAccess, Visitor};
use serde_derive::{Deserialize, Serialize};
use std::cmp::{self, Ordering};
use std::collections::{BTreeMap as Map, BTreeSet as Set, VecDeque};
use std::env;
use std::fmt::{self, Display};
use std::fs;
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::process;
use thiserror::Error;
static VERSION: &str = concat!("star-history ", env!("CARGO_PKG_VERSION"));
static HELP: &str = concat!(
"star-history ",
env!("CARGO_PKG_VERSION"),
"
David Tolnay <[email protected]>
Produce a graph showing number of GitHub stars of a user or repo over time.
USAGE:
gh auth login
star-history [USER...] [USER/REPO...]
EXAMPLES:
star-history dtolnay
star-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
}
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct | {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while!work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
let defer = work.split_off(batch_size);
let batch = mem::replace(&mut work, defer);
let mut query = String::new();
query += "{\n";
for (i, work) in batch.iter().enumerate() {
let cursor = &work.cursor;
query += &match &work.series {
Series::Owner(owner) => query_owner(i, owner, cursor),
Series::Repo(owner, repo) => query_repo(i, owner, repo, cursor),
};
}
query += "}\n";
let json = client
.post("https://api.github.com/graphql")
.header(USER_AGENT, "dtolnay/star-history")
.header(AUTHORIZATION, &authorization)
.json(&Request { query })
.send()?
.text()?;
let response: Response = serde_json::from_str(&json).map_err(Error::DecodeResponse)?;
if let Some(message) = response.message {
return Err(Error::GitHub(message));
}
for err in response.errors {
log.error(Error::GitHub(err.message));
}
let mut data = response.data;
let mut queue = batch.into_iter();
while let Some(node) = data.pop_front() {
let id = queue.next();
match node {
Data::Owner(None) | Data::Repo(None) => match id.unwrap().series {
Series::Owner(owner) => return Err(Error::NoSuchUser(owner)),
Series::Repo(owner, repo) => return Err(Error::NoSuchRepo(owner, repo)),
},
Data::Owner(Some(node)) => {
let owner = node.login;
for repo in node.repositories.nodes {
data.push_back(Data::Repo(Some(repo)));
}
if node.repositories.page_info.has_next_page {
work.push(Work {
series: Series::Owner(owner),
cursor: node.repositories.page_info.end_cursor,
});
}
}
Data::Repo(Some(node)) => {
let owner = node.owner.login;
let repo = node.name;
if let Some(stargazers) = node.stargazers {
let series = Series::Owner(owner.clone());
let owner_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
owner_stars.insert(star.clone());
}
let series = Series::Repo(owner.clone(), repo.clone());
let repo_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
repo_stars.insert(star.clone());
}
if stargazers.page_info.has_next_page {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: stargazers.page_info.end_cursor,
});
}
} else {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: Cursor(None),
});
}
}
}
}
log.tick();
}
let now = Utc::now();
for set in stars.values_mut() {
if let Some(first) = set.iter().next() {
let first_time = first.time;
set.insert(Star {
time: first_time - Duration::seconds(1),
node: Default::default(),
});
}
match set.iter().next_back() {
Some(last) if last.time >= now => {}
_ => {
set.insert(Star {
time: now,
node: Default::default(),
});
}
}
}
let mut data = String::new();
data += "var data = [\n";
for arg in &args {
data += " {\"name\":\"";
data += &arg.to_string();
data += "\", \"values\":[\n";
let stars = &stars[arg];
for (i, star) in stars.iter().enumerate() {
data += " {\"time\":";
data += &star.time.timestamp().to_string();
data += ", \"stars\":";
data += &(i.saturating_sub((star.time == now) as usize)).to_string();
data += "},\n";
}
data += " ]},\n";
}
data += " ];";
let html = include_str!("index.html").replace("var data = [];", &data);
let dir = env::temp_dir().join("star-history");
fs::create_dir_all(&dir)?;
let path = dir.join(format!("{}.html", now.timestamp_millis()));
fs::write(&path, html)?;
if opener::open(&path).is_err() {
writeln!(log, "graph written to {}", path.display());
}
Ok(())
}
fn query_owner(i: usize, login: &str, cursor: &Cursor) -> String {
r#"
owner$i: repositoryOwner(login: "$login") {
login
repositories(after: $cursor, first: 100, isFork: false, privacy: PUBLIC, ownerAffiliations: [OWNER]) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
owner {
login
}
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$login", login)
.replace("$cursor", &cursor.to_string())
}
fn query_repo(i: usize, owner: &str, repo: &str, cursor: &Cursor) -> String {
r#"
repo$i: repository(owner: "$owner", name: "$repo") {
name
owner {
login
}
stargazers(after: $cursor, first: 100) {
pageInfo {
hasNextPage
endCursor
}
edges {
node {
login
}
starredAt
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$owner", owner)
.replace("$repo", repo)
.replace("$cursor", &cursor.to_string())
}
| Request | identifier_name |
main.rs | //! [![github]](https://github.com/dtolnay/star-history) [![crates-io]](https://crates.io/crates/star-history) [![docs-rs]](https://docs.rs/star-history)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
#![allow(
clippy::cast_lossless,
clippy::default_trait_access,
clippy::let_underscore_untyped,
// Clippy bug: https://github.com/rust-lang/rust-clippy/issues/7422
clippy::nonstandard_macro_braces,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::toplevel_ref_arg,
clippy::uninlined_format_args,
)]
mod log;
use crate::log::Log;
use chrono::{DateTime, Duration, Utc};
use reqwest::blocking::Client;
use reqwest::header::{AUTHORIZATION, USER_AGENT};
use serde::de::{self, Deserialize, Deserializer, IgnoredAny, MapAccess, SeqAccess, Visitor};
use serde_derive::{Deserialize, Serialize};
use std::cmp::{self, Ordering};
use std::collections::{BTreeMap as Map, BTreeSet as Set, VecDeque};
use std::env;
use std::fmt::{self, Display};
use std::fs;
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::process;
use thiserror::Error;
static VERSION: &str = concat!("star-history ", env!("CARGO_PKG_VERSION"));
static HELP: &str = concat!(
"star-history ",
env!("CARGO_PKG_VERSION"),
"
David Tolnay <[email protected]>
Produce a graph showing number of GitHub stars of a user or repo over time.
USAGE:
gh auth login
star-history [USER...] [USER/REPO...]
EXAMPLES:
star-history dtolnay
star-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
}
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct Request {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while!work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
let defer = work.split_off(batch_size);
let batch = mem::replace(&mut work, defer);
let mut query = String::new();
query += "{\n";
for (i, work) in batch.iter().enumerate() {
let cursor = &work.cursor;
query += &match &work.series {
Series::Owner(owner) => query_owner(i, owner, cursor),
Series::Repo(owner, repo) => query_repo(i, owner, repo, cursor),
};
}
query += "}\n";
let json = client
.post("https://api.github.com/graphql")
.header(USER_AGENT, "dtolnay/star-history")
.header(AUTHORIZATION, &authorization)
.json(&Request { query })
.send()?
.text()?;
let response: Response = serde_json::from_str(&json).map_err(Error::DecodeResponse)?;
if let Some(message) = response.message {
return Err(Error::GitHub(message));
}
for err in response.errors {
log.error(Error::GitHub(err.message));
}
let mut data = response.data;
let mut queue = batch.into_iter();
while let Some(node) = data.pop_front() {
let id = queue.next();
match node {
Data::Owner(None) | Data::Repo(None) => match id.unwrap().series {
Series::Owner(owner) => return Err(Error::NoSuchUser(owner)),
Series::Repo(owner, repo) => return Err(Error::NoSuchRepo(owner, repo)),
},
Data::Owner(Some(node)) => {
let owner = node.login;
for repo in node.repositories.nodes {
data.push_back(Data::Repo(Some(repo)));
}
if node.repositories.page_info.has_next_page {
work.push(Work {
series: Series::Owner(owner),
cursor: node.repositories.page_info.end_cursor,
});
}
}
Data::Repo(Some(node)) => {
let owner = node.owner.login;
let repo = node.name;
if let Some(stargazers) = node.stargazers {
let series = Series::Owner(owner.clone());
let owner_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
owner_stars.insert(star.clone());
}
let series = Series::Repo(owner.clone(), repo.clone());
let repo_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
repo_stars.insert(star.clone());
}
if stargazers.page_info.has_next_page {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: stargazers.page_info.end_cursor,
});
}
} else {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: Cursor(None),
});
}
}
}
}
log.tick();
}
let now = Utc::now();
for set in stars.values_mut() {
if let Some(first) = set.iter().next() {
let first_time = first.time;
set.insert(Star {
time: first_time - Duration::seconds(1),
node: Default::default(),
});
}
match set.iter().next_back() {
Some(last) if last.time >= now => {}
_ => |
}
}
let mut data = String::new();
data += "var data = [\n";
for arg in &args {
data += " {\"name\":\"";
data += &arg.to_string();
data += "\", \"values\":[\n";
let stars = &stars[arg];
for (i, star) in stars.iter().enumerate() {
data += " {\"time\":";
data += &star.time.timestamp().to_string();
data += ", \"stars\":";
data += &(i.saturating_sub((star.time == now) as usize)).to_string();
data += "},\n";
}
data += " ]},\n";
}
data += " ];";
let html = include_str!("index.html").replace("var data = [];", &data);
let dir = env::temp_dir().join("star-history");
fs::create_dir_all(&dir)?;
let path = dir.join(format!("{}.html", now.timestamp_millis()));
fs::write(&path, html)?;
if opener::open(&path).is_err() {
writeln!(log, "graph written to {}", path.display());
}
Ok(())
}
fn query_owner(i: usize, login: &str, cursor: &Cursor) -> String {
r#"
owner$i: repositoryOwner(login: "$login") {
login
repositories(after: $cursor, first: 100, isFork: false, privacy: PUBLIC, ownerAffiliations: [OWNER]) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
owner {
login
}
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$login", login)
.replace("$cursor", &cursor.to_string())
}
fn query_repo(i: usize, owner: &str, repo: &str, cursor: &Cursor) -> String {
r#"
repo$i: repository(owner: "$owner", name: "$repo") {
name
owner {
login
}
stargazers(after: $cursor, first: 100) {
pageInfo {
hasNextPage
endCursor
}
edges {
node {
login
}
starredAt
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$owner", owner)
.replace("$repo", repo)
.replace("$cursor", &cursor.to_string())
}
| {
set.insert(Star {
time: now,
node: Default::default(),
});
} | conditional_block |
main.rs | //! [![github]](https://github.com/dtolnay/star-history) [![crates-io]](https://crates.io/crates/star-history) [![docs-rs]](https://docs.rs/star-history)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
#![allow(
clippy::cast_lossless,
clippy::default_trait_access,
clippy::let_underscore_untyped,
// Clippy bug: https://github.com/rust-lang/rust-clippy/issues/7422
clippy::nonstandard_macro_braces,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::toplevel_ref_arg,
clippy::uninlined_format_args,
)]
mod log;
use crate::log::Log;
use chrono::{DateTime, Duration, Utc};
use reqwest::blocking::Client;
use reqwest::header::{AUTHORIZATION, USER_AGENT};
use serde::de::{self, Deserialize, Deserializer, IgnoredAny, MapAccess, SeqAccess, Visitor};
use serde_derive::{Deserialize, Serialize};
use std::cmp::{self, Ordering};
use std::collections::{BTreeMap as Map, BTreeSet as Set, VecDeque};
use std::env;
use std::fmt::{self, Display};
use std::fs;
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::process;
use thiserror::Error;
static VERSION: &str = concat!("star-history ", env!("CARGO_PKG_VERSION"));
static HELP: &str = concat!(
"star-history ",
env!("CARGO_PKG_VERSION"),
"
David Tolnay <[email protected]>
Produce a graph showing number of GitHub stars of a user or repo over time.
USAGE:
gh auth login
star-history [USER...] [USER/REPO...]
EXAMPLES:
star-history dtolnay
star-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
}
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct Request {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where | E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while!work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
let defer = work.split_off(batch_size);
let batch = mem::replace(&mut work, defer);
let mut query = String::new();
query += "{\n";
for (i, work) in batch.iter().enumerate() {
let cursor = &work.cursor;
query += &match &work.series {
Series::Owner(owner) => query_owner(i, owner, cursor),
Series::Repo(owner, repo) => query_repo(i, owner, repo, cursor),
};
}
query += "}\n";
let json = client
.post("https://api.github.com/graphql")
.header(USER_AGENT, "dtolnay/star-history")
.header(AUTHORIZATION, &authorization)
.json(&Request { query })
.send()?
.text()?;
let response: Response = serde_json::from_str(&json).map_err(Error::DecodeResponse)?;
if let Some(message) = response.message {
return Err(Error::GitHub(message));
}
for err in response.errors {
log.error(Error::GitHub(err.message));
}
let mut data = response.data;
let mut queue = batch.into_iter();
while let Some(node) = data.pop_front() {
let id = queue.next();
match node {
Data::Owner(None) | Data::Repo(None) => match id.unwrap().series {
Series::Owner(owner) => return Err(Error::NoSuchUser(owner)),
Series::Repo(owner, repo) => return Err(Error::NoSuchRepo(owner, repo)),
},
Data::Owner(Some(node)) => {
let owner = node.login;
for repo in node.repositories.nodes {
data.push_back(Data::Repo(Some(repo)));
}
if node.repositories.page_info.has_next_page {
work.push(Work {
series: Series::Owner(owner),
cursor: node.repositories.page_info.end_cursor,
});
}
}
Data::Repo(Some(node)) => {
let owner = node.owner.login;
let repo = node.name;
if let Some(stargazers) = node.stargazers {
let series = Series::Owner(owner.clone());
let owner_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
owner_stars.insert(star.clone());
}
let series = Series::Repo(owner.clone(), repo.clone());
let repo_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
repo_stars.insert(star.clone());
}
if stargazers.page_info.has_next_page {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: stargazers.page_info.end_cursor,
});
}
} else {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: Cursor(None),
});
}
}
}
}
log.tick();
}
let now = Utc::now();
for set in stars.values_mut() {
if let Some(first) = set.iter().next() {
let first_time = first.time;
set.insert(Star {
time: first_time - Duration::seconds(1),
node: Default::default(),
});
}
match set.iter().next_back() {
Some(last) if last.time >= now => {}
_ => {
set.insert(Star {
time: now,
node: Default::default(),
});
}
}
}
let mut data = String::new();
data += "var data = [\n";
for arg in &args {
data += " {\"name\":\"";
data += &arg.to_string();
data += "\", \"values\":[\n";
let stars = &stars[arg];
for (i, star) in stars.iter().enumerate() {
data += " {\"time\":";
data += &star.time.timestamp().to_string();
data += ", \"stars\":";
data += &(i.saturating_sub((star.time == now) as usize)).to_string();
data += "},\n";
}
data += " ]},\n";
}
data += " ];";
let html = include_str!("index.html").replace("var data = [];", &data);
let dir = env::temp_dir().join("star-history");
fs::create_dir_all(&dir)?;
let path = dir.join(format!("{}.html", now.timestamp_millis()));
fs::write(&path, html)?;
if opener::open(&path).is_err() {
writeln!(log, "graph written to {}", path.display());
}
Ok(())
}
fn query_owner(i: usize, login: &str, cursor: &Cursor) -> String {
r#"
owner$i: repositoryOwner(login: "$login") {
login
repositories(after: $cursor, first: 100, isFork: false, privacy: PUBLIC, ownerAffiliations: [OWNER]) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
owner {
login
}
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$login", login)
.replace("$cursor", &cursor.to_string())
}
fn query_repo(i: usize, owner: &str, repo: &str, cursor: &Cursor) -> String {
r#"
repo$i: repository(owner: "$owner", name: "$repo") {
name
owner {
login
}
stargazers(after: $cursor, first: 100) {
pageInfo {
hasNextPage
endCursor
}
edges {
node {
login
}
starredAt
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$owner", owner)
.replace("$repo", repo)
.replace("$cursor", &cursor.to_string())
} | random_line_split |
|
main.rs | //! [![github]](https://github.com/dtolnay/star-history) [![crates-io]](https://crates.io/crates/star-history) [![docs-rs]](https://docs.rs/star-history)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
#![allow(
clippy::cast_lossless,
clippy::default_trait_access,
clippy::let_underscore_untyped,
// Clippy bug: https://github.com/rust-lang/rust-clippy/issues/7422
clippy::nonstandard_macro_braces,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::toplevel_ref_arg,
clippy::uninlined_format_args,
)]
mod log;
use crate::log::Log;
use chrono::{DateTime, Duration, Utc};
use reqwest::blocking::Client;
use reqwest::header::{AUTHORIZATION, USER_AGENT};
use serde::de::{self, Deserialize, Deserializer, IgnoredAny, MapAccess, SeqAccess, Visitor};
use serde_derive::{Deserialize, Serialize};
use std::cmp::{self, Ordering};
use std::collections::{BTreeMap as Map, BTreeSet as Set, VecDeque};
use std::env;
use std::fmt::{self, Display};
use std::fs;
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::process;
use thiserror::Error;
static VERSION: &str = concat!("star-history ", env!("CARGO_PKG_VERSION"));
static HELP: &str = concat!(
"star-history ",
env!("CARGO_PKG_VERSION"),
"
David Tolnay <[email protected]>
Produce a graph showing number of GitHub stars of a user or repo over time.
USAGE:
gh auth login
star-history [USER...] [USER/REPO...]
EXAMPLES:
star-history dtolnay
star-history dtolnay/syn dtolnay/quote
star-history serde-rs/serde
",
);
static MISSING_TOKEN: &str = "\
Error: GitHub auth token is not set up.
(Expected config file: {{path}})
Run `gh auth login` to store a GitHub login token. The `gh` CLI
can be installed from <https://cli.github.com>.
If you prefer not to use the `gh` CLI, you can instead provide
a token to star-history through the GITHUB_TOKEN environment
variable. Head to <https://github.com/settings/tokens> and click
\"Generate new token (classic)\". The default public access
permission is sufficient -- you can leave all the checkboxes
empty. Save the generated token somewhere like ~/.githubtoken
and use `export GITHUB_TOKEN=$(cat ~/.githubtoken)`.
";
#[derive(Error, Debug)]
enum Error {
#[error("Error from GitHub api: {0}")]
GitHub(String),
#[error("failed to decode response body")]
DecodeResponse(#[source] serde_json::Error),
#[error("no such user: {0}")]
NoSuchUser(String),
#[error("no such repository: {0}/{1}")]
NoSuchRepo(String, String),
#[error(transparent)]
GhToken(#[from] gh_token::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Io(#[from] io::Error),
}
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Eq, Clone)]
enum Series {
Owner(String),
Repo(String, String),
}
impl Display for Series {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
Series::Owner(owner) => formatter.write_str(owner)?,
Series::Repo(owner, repo) => {
formatter.write_str(owner)?;
formatter.write_str("/")?;
formatter.write_str(repo)?;
}
}
Ok(())
}
}
impl Ord for Series {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Series::Owner(lowner), Series::Owner(rowner)) => {
lowner.to_lowercase().cmp(&rowner.to_lowercase())
}
(Series::Repo(lowner, lrepo), Series::Repo(rowner, rrepo)) => {
(lowner.to_lowercase(), lrepo.to_lowercase())
.cmp(&(rowner.to_lowercase(), rrepo.to_lowercase()))
}
(Series::Owner(_), Series::Repo(..)) => Ordering::Less,
(Series::Repo(..), Series::Owner(_)) => Ordering::Greater,
}
}
}
impl PartialOrd for Series {
fn partial_cmp(&self, other: &Series) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Series {
fn eq(&self, other: &Series) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(transparent)]
struct Cursor(Option<String>);
impl Display for Cursor {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result |
}
struct Work {
series: Series,
cursor: Cursor,
}
#[derive(Serialize)]
struct Request {
query: String,
}
#[derive(Deserialize, Debug)]
struct Response {
message: Option<String>,
#[serde(default, deserialize_with = "deserialize_data")]
data: VecDeque<Data>,
#[serde(default)]
errors: Vec<Message>,
}
#[derive(Deserialize, Debug)]
struct Message {
message: String,
}
#[derive(Debug)]
enum Data {
Owner(Option<Owner>),
Repo(Option<Repo>),
}
#[derive(Deserialize, Debug)]
struct Owner {
login: String,
repositories: Repositories,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Repositories {
page_info: PageInfo,
nodes: Vec<Repo>,
}
#[derive(Deserialize, Debug)]
struct Repo {
name: String,
owner: Account,
stargazers: Option<Stargazers>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Default, Debug)]
struct Account {
login: String,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct Stargazers {
page_info: PageInfo,
#[serde(deserialize_with = "non_nulls")]
edges: Vec<Star>,
}
#[derive(Deserialize, Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
struct Star {
#[serde(rename = "starredAt")]
time: DateTime<Utc>,
node: Account,
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
struct PageInfo {
has_next_page: bool,
end_cursor: Cursor,
}
fn deserialize_data<'de, D>(deserializer: D) -> Result<VecDeque<Data>, D::Error>
where
D: Deserializer<'de>,
{
struct ResponseVisitor;
impl<'de> Visitor<'de> for ResponseVisitor {
type Value = VecDeque<Data>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Map<String, Data>")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut data = VecDeque::new();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("owner") {
let owner = map.next_value::<Option<Owner>>()?;
data.push_back(Data::Owner(owner));
} else if key.starts_with("repo") {
let repo = map.next_value::<Option<Repo>>()?;
data.push_back(Data::Repo(repo));
} else {
map.next_value::<IgnoredAny>()?;
}
}
Ok(data)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(VecDeque::new())
}
}
deserializer.deserialize_any(ResponseVisitor)
}
fn non_nulls<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
struct NonNullsVisitor<T>(PhantomData<fn() -> T>);
impl<'de, T> Visitor<'de> for NonNullsVisitor<T>
where
T: Deserialize<'de>,
{
type Value = Vec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("array")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(next) = seq.next_element::<Option<T>>()? {
vec.extend(next);
}
Ok(vec)
}
}
let visitor = NonNullsVisitor(PhantomData);
deserializer.deserialize_seq(visitor)
}
fn main() {
let ref mut log = Log::new();
if let Err(err) = try_main(log) {
log.error(err);
process::exit(1);
}
}
fn try_main(log: &mut Log) -> Result<()> {
let mut args = Vec::new();
for arg in env::args().skip(1) {
if arg == "--help" {
print!("{}", HELP);
process::exit(0);
} else if arg == "--version" {
println!("{}", VERSION);
process::exit(0);
}
let mut parts = arg.splitn(2, '/');
let owner = parts.next().unwrap();
match parts.next() {
Some(repo) => {
let owner = owner.to_owned();
let repo = repo.to_owned();
args.push(Series::Repo(owner, repo));
}
None => {
let owner = owner.strip_prefix('@').unwrap_or(owner).to_owned();
args.push(Series::Owner(owner));
}
}
}
let github_token = match gh_token::get() {
Ok(token) => token,
Err(gh_token::Error::NotConfigured(path)) => {
let path_lossy = path.to_string_lossy();
let message = MISSING_TOKEN.replace("{{path}}", &path_lossy);
eprint!("{}", message);
process::exit(1);
}
Err(error) => return Err(Error::GhToken(error)),
};
let authorization = format!("bearer {}", github_token.trim());
if args.is_empty() {
eprint!("{}", HELP);
process::exit(1);
}
let mut work = Vec::new();
let mut stars = Map::new();
for series in &args {
stars.insert(series.clone(), Set::new());
work.push(Work {
series: series.clone(),
cursor: Cursor(None),
});
}
let client = Client::new();
while!work.is_empty() {
let batch_size = cmp::min(work.len(), 50);
let defer = work.split_off(batch_size);
let batch = mem::replace(&mut work, defer);
let mut query = String::new();
query += "{\n";
for (i, work) in batch.iter().enumerate() {
let cursor = &work.cursor;
query += &match &work.series {
Series::Owner(owner) => query_owner(i, owner, cursor),
Series::Repo(owner, repo) => query_repo(i, owner, repo, cursor),
};
}
query += "}\n";
let json = client
.post("https://api.github.com/graphql")
.header(USER_AGENT, "dtolnay/star-history")
.header(AUTHORIZATION, &authorization)
.json(&Request { query })
.send()?
.text()?;
let response: Response = serde_json::from_str(&json).map_err(Error::DecodeResponse)?;
if let Some(message) = response.message {
return Err(Error::GitHub(message));
}
for err in response.errors {
log.error(Error::GitHub(err.message));
}
let mut data = response.data;
let mut queue = batch.into_iter();
while let Some(node) = data.pop_front() {
let id = queue.next();
match node {
Data::Owner(None) | Data::Repo(None) => match id.unwrap().series {
Series::Owner(owner) => return Err(Error::NoSuchUser(owner)),
Series::Repo(owner, repo) => return Err(Error::NoSuchRepo(owner, repo)),
},
Data::Owner(Some(node)) => {
let owner = node.login;
for repo in node.repositories.nodes {
data.push_back(Data::Repo(Some(repo)));
}
if node.repositories.page_info.has_next_page {
work.push(Work {
series: Series::Owner(owner),
cursor: node.repositories.page_info.end_cursor,
});
}
}
Data::Repo(Some(node)) => {
let owner = node.owner.login;
let repo = node.name;
if let Some(stargazers) = node.stargazers {
let series = Series::Owner(owner.clone());
let owner_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
owner_stars.insert(star.clone());
}
let series = Series::Repo(owner.clone(), repo.clone());
let repo_stars = stars.entry(series).or_default();
for star in &stargazers.edges {
repo_stars.insert(star.clone());
}
if stargazers.page_info.has_next_page {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: stargazers.page_info.end_cursor,
});
}
} else {
work.push(Work {
series: Series::Repo(owner, repo),
cursor: Cursor(None),
});
}
}
}
}
log.tick();
}
let now = Utc::now();
for set in stars.values_mut() {
if let Some(first) = set.iter().next() {
let first_time = first.time;
set.insert(Star {
time: first_time - Duration::seconds(1),
node: Default::default(),
});
}
match set.iter().next_back() {
Some(last) if last.time >= now => {}
_ => {
set.insert(Star {
time: now,
node: Default::default(),
});
}
}
}
let mut data = String::new();
data += "var data = [\n";
for arg in &args {
data += " {\"name\":\"";
data += &arg.to_string();
data += "\", \"values\":[\n";
let stars = &stars[arg];
for (i, star) in stars.iter().enumerate() {
data += " {\"time\":";
data += &star.time.timestamp().to_string();
data += ", \"stars\":";
data += &(i.saturating_sub((star.time == now) as usize)).to_string();
data += "},\n";
}
data += " ]},\n";
}
data += " ];";
let html = include_str!("index.html").replace("var data = [];", &data);
let dir = env::temp_dir().join("star-history");
fs::create_dir_all(&dir)?;
let path = dir.join(format!("{}.html", now.timestamp_millis()));
fs::write(&path, html)?;
if opener::open(&path).is_err() {
writeln!(log, "graph written to {}", path.display());
}
Ok(())
}
fn query_owner(i: usize, login: &str, cursor: &Cursor) -> String {
r#"
owner$i: repositoryOwner(login: "$login") {
login
repositories(after: $cursor, first: 100, isFork: false, privacy: PUBLIC, ownerAffiliations: [OWNER]) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
owner {
login
}
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$login", login)
.replace("$cursor", &cursor.to_string())
}
fn query_repo(i: usize, owner: &str, repo: &str, cursor: &Cursor) -> String {
r#"
repo$i: repository(owner: "$owner", name: "$repo") {
name
owner {
login
}
stargazers(after: $cursor, first: 100) {
pageInfo {
hasNextPage
endCursor
}
edges {
node {
login
}
starredAt
}
}
}
"#
.replace("$i", &i.to_string())
.replace("$owner", owner)
.replace("$repo", repo)
.replace("$cursor", &cursor.to_string())
}
| {
match &self.0 {
Some(cursor) => {
formatter.write_str("\"")?;
formatter.write_str(cursor)?;
formatter.write_str("\"")?;
}
None => formatter.write_str("null")?,
}
Ok(())
} | identifier_body |
lib.rs | //! Implements sampling of loot tables.
use ahash::AHashMap;
use feather_items::{Item, ItemStack};
use feather_loot_model as model;
use inlinable_string::InlinableString;
use itertools::Itertools;
use model::{Condition, Entry, EntryKind, Function, FunctionKind, LootTableSet, Pool};
use once_cell::sync::Lazy;
use rand::Rng;
use smallvec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else {
cumulative_weight += entry.weight;
false
}
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn satisfies_conditions<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier()!= item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if!predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn sample_dirt() |
#[test]
fn grass_block_condition() {
let table = loot_table("blocks/grass_block").unwrap_or_else(|| {
panic!(
"missing loot table for grass block\nnote: loaded keys: {:?}",
STORE.keys()
);
});
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions { item: None }).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
}
| {
let table = loot_table("blocks/dirt").expect("missing loot table for dirt block");
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions::default()).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
} | identifier_body |
lib.rs | //! Implements sampling of loot tables.
use ahash::AHashMap;
use feather_items::{Item, ItemStack};
use feather_loot_model as model;
use inlinable_string::InlinableString;
use itertools::Itertools;
use model::{Condition, Entry, EntryKind, Function, FunctionKind, LootTableSet, Pool};
use once_cell::sync::Lazy;
use rand::Rng;
use smallvec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else |
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn satisfies_conditions<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier()!= item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if!predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn sample_dirt() {
let table = loot_table("blocks/dirt").expect("missing loot table for dirt block");
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions::default()).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
#[test]
fn grass_block_condition() {
let table = loot_table("blocks/grass_block").unwrap_or_else(|| {
panic!(
"missing loot table for grass block\nnote: loaded keys: {:?}",
STORE.keys()
);
});
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions { item: None }).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
}
| {
cumulative_weight += entry.weight;
false
} | conditional_block |
lib.rs | //! Implements sampling of loot tables.
use ahash::AHashMap;
use feather_items::{Item, ItemStack};
use feather_loot_model as model;
use inlinable_string::InlinableString;
use itertools::Itertools;
use model::{Condition, Entry, EntryKind, Function, FunctionKind, LootTableSet, Pool};
use once_cell::sync::Lazy;
use rand::Rng;
use smallvec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>(); | // the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else {
cumulative_weight += entry.weight;
false
}
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn satisfies_conditions<'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier()!= item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if!predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn sample_dirt() {
let table = loot_table("blocks/dirt").expect("missing loot table for dirt block");
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions::default()).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
#[test]
fn grass_block_condition() {
let table = loot_table("blocks/grass_block").unwrap_or_else(|| {
panic!(
"missing loot table for grass block\nnote: loaded keys: {:?}",
STORE.keys()
);
});
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions { item: None }).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
} | for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching | random_line_split |
lib.rs | //! Implements sampling of loot tables.
use ahash::AHashMap;
use feather_items::{Item, ItemStack};
use feather_loot_model as model;
use inlinable_string::InlinableString;
use itertools::Itertools;
use model::{Condition, Entry, EntryKind, Function, FunctionKind, LootTableSet, Pool};
use once_cell::sync::Lazy;
use rand::Rng;
use smallvec::SmallVec;
use std::iter;
use thiserror::Error;
/// The global loot table store, initialized at runtime from
/// the embedded loot table dump. (Generated by the build script)
static STORE: Lazy<AHashMap<InlinableString, LootTable>> = Lazy::new(|| {
static BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/dump.json"));
serde_json::from_slice::<LootTableSet>(BYTES)
.expect("invalid loot table dump")
.0
.into_iter()
.map(|(k, v)| (k, LootTable(v)))
.collect()
});
/// Returns the loot table with the given ID, if it exists.
/// IDs are the same as those used in MC data packs. For example,
/// the loot table for stone blocks has ID "blocks/stone."
pub fn loot_table(id: &str) -> Option<&'static LootTable> {
STORE.get(id)
}
/// Condition context used to determine whether loot table conditions are satisfied.
#[derive(Debug, Default)]
pub struct Conditions {
/// The item used to break a block
pub item: Option<ItemStack>,
}
/// Opaque wrapper over `model::LootTable`.
#[derive(Debug)]
pub struct LootTable(model::LootTable);
/// Error returned when a loot table sample fails.
#[derive(Debug, Error)]
pub enum SampleError {
#[error("invalid item identifier {0}")]
InvalidItem(String),
#[error("missing loot table {0}")]
MissingLootTable(String),
/// Should be handled gracefully.
#[error("dynamic loot table {0:?}")]
IsDynamic(DynamicKind),
}
/// Indicates that the yielded item should
/// be computed based on a block entity.
#[derive(Debug)]
pub enum DynamicKind {
/// Drop e.g. contents of chest
Contents,
/// Drop the block itself (e.g. player head, banner)
This,
}
impl LootTable {
/// Samples a value from the table.
///
/// The returned set of item stacks is the result of sampling,
/// i.e. the loot which should be yielded. May return multiple
/// item stacks. No guarantee is made about the ordering
/// or distinction of the returned vector.
pub fn sample(
&self,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<SmallVec<[ItemStack; 2]>, SampleError> {
let mut results = SmallVec::new();
let pools = &self.0.pools;
// Algorithm: sample each pool.
// For each pool, evaluate `rolls` entries based on `Entry.weight`
// and yield their results.
for pool in pools {
sample_pool(pool, rng, &mut results, conditions)?;
}
Ok(results)
}
}
fn sample_pool(
pool: &Pool,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
// `rolls` times, choose an entry at random based on weighting
// and yield its results.
// Only select from entries with their conditions satisfied
let entries = pool
.entries
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
let weight_sum = entries.iter().map(|entry| entry.weight).sum::<u32>();
for _ in 0..pool.rolls.sample(rng) {
// We choose an integer at random from [0, weight_sum) and
// determine which entry has a cumulative weight matching
// the result. This algorithm is O(n) computaitonally, but this is unlikely
// to matter in practice, because loot tables rarely
// have more than one or two entries per pool.
let n = rng.gen_range(0, weight_sum);
let mut cumulative_weight = 0;
let entry = entries
.iter()
.find(|entry| {
if n >= cumulative_weight && n < cumulative_weight + entry.weight {
true
} else {
cumulative_weight += entry.weight;
false
}
})
.expect("entry finding algorithm incorrect");
sample_entry(entry, rng, results, conditions)?;
}
// apply functions to results
results
.iter_mut()
.try_for_each(|item| apply_functions(pool.functions.iter(), item, rng, conditions))?;
Ok(())
}
fn sample_entry(
entry: &Entry,
rng: &mut impl Rng,
results: &mut SmallVec<[ItemStack; 2]>,
conditions: &Conditions,
) -> Result<(), SampleError> {
let mut single;
let mut none = iter::empty();
let mut sampled;
let items: &mut dyn Iterator<Item = ItemStack> = match &entry.kind {
EntryKind::Empty => &mut none,
EntryKind::Item => {
let item = Item::from_identifier(&entry.name)
.ok_or_else(|| SampleError::InvalidItem(entry.name.to_string()))?;
single = iter::once(ItemStack::new(item, 1));
&mut single
}
EntryKind::Tag => &mut none, // TODO
EntryKind::LootTable => {
let table = loot_table(&entry.name)
.ok_or_else(|| SampleError::MissingLootTable(entry.name.to_string()))?;
sampled = table.sample(rng, conditions)?.into_iter();
&mut sampled
}
EntryKind::Group => {
// Return an iterator over the child entries
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.filter(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Alternatives => {
// Only sample first entry whose conditions are satisfied, if any
let mut temp = SmallVec::new();
if let Some(entry) = entry
.children
.iter()
.find(|entry| satisfies_conditions(entry.conditions.iter(), conditions, rng))
{
sample_entry(entry, rng, &mut temp, conditions)?;
}
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Sequence => {
// Apply all entries until one does not satisfy conditions
let mut temp = SmallVec::new();
let entries = entry
.children
.iter()
.map(|entry| {
if satisfies_conditions(entry.conditions.iter(), conditions, rng) {
Some(entry)
} else {
None
}
})
.while_some()
.collect::<SmallVec<[&Entry; 4]>>();
entries
.into_iter()
.try_for_each(|entry| sample_entry(entry, rng, &mut temp, conditions))?;
sampled = temp.into_iter();
&mut sampled
}
EntryKind::Dynamic => {
let kind = if entry.name == "contents" || entry.name == "minecraft:contents" {
DynamicKind::Contents
} else {
DynamicKind::This
};
return Err(SampleError::IsDynamic(kind));
}
};
results.extend(
items
.map(|mut item| {
apply_functions(entry.functions.iter(), &mut item, rng, conditions)?;
Ok(item)
})
.filter_map(|item: Result<ItemStack, SampleError>| item.ok()),
);
Ok(())
}
fn apply_functions<'a>(
functions: impl Iterator<Item = &'a Function>,
item: &mut ItemStack,
rng: &mut impl Rng,
conditions: &Conditions,
) -> Result<(), SampleError> {
let functions = functions
.filter(|f| satisfies_conditions(f.conditions.iter(), conditions, rng))
.collect::<SmallVec<[&Function; 4]>>();
for function in functions {
match &function.kind {
FunctionKind::SetCount { count } => {
let count = count.sample(rng);
item.amount = count as u8;
}
FunctionKind::Unknown => (),
}
}
Ok(())
}
fn | <'a>(
mut conditions: impl Iterator<Item = &'a Condition>,
input: &Conditions,
rng: &mut impl Rng,
) -> bool {
conditions.all(|condition| match condition {
Condition::MatchTool { predicate } => {
if let Some(item) = &predicate.item {
match &input.item {
Some(stack) => {
if stack.ty.identifier()!= item {
return false;
}
}
None => return false,
}
}
// enchantments are not yet supported
if!predicate.enchantments.is_empty() {
return false;
}
true
}
Condition::RandomChance { chance } => {
let chance = chance.max(0.0).min(1.0);
rng.gen_bool(chance)
}
Condition::Unknown => true,
})
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rngs::mock::StepRng;
#[test]
fn store_deserializes_successfully() {
Lazy::force(&STORE);
}
#[test]
fn sample_dirt() {
let table = loot_table("blocks/dirt").expect("missing loot table for dirt block");
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions::default()).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
#[test]
fn grass_block_condition() {
let table = loot_table("blocks/grass_block").unwrap_or_else(|| {
panic!(
"missing loot table for grass block\nnote: loaded keys: {:?}",
STORE.keys()
);
});
let mut rng = StepRng::new(0, 1);
let items = table.sample(&mut rng, &Conditions { item: None }).unwrap();
assert_eq!(items.as_slice(), &[ItemStack::new(Item::Dirt, 1)]);
}
}
| satisfies_conditions | identifier_name |
memory_index.rs | -> (PolicyDecision, Box<dyn Policy>);
}
/// Placeholder Policy that keeps everything.
struct KeepPolicy;
impl KeepPolicy {
fn new() -> KeepPolicy {
KeepPolicy
}
}
impl Policy for KeepPolicy {
fn handle(&mut self, property: &str, object: Object)
-> (PolicyDecision, Box<dyn Policy>) {
(PolicyDecision::Keep, Box::new(KeepPolicy))
}
}
/// Key of a reference, used in the backward reference map.
///
/// A reference is a value, and can appear in both types of schema objects: in a
/// dict, it is associated with a string key, and in a list, with an index.
#[derive(PartialEq, Eq, Hash)]
enum Backkey {
/// Reference from a dict under this key.
Key(String),
/// Reference from a list from this index.
Index(usize),
}
enum PermanodeType {
Set,
Single,
}
struct Permanode {
sort: Sort,
nodetype: PermanodeType,
claims: BTreeMap<Property, ID>,
}
impl Permanode {
fn index_claim(&mut self, claim: &Dict, permanode_id: &ID, claim_id: &ID) {
// We require the claim to have the sort key
let sort_value: &Property = match claim.get(self.sort.field()) {
Some(ref prop) => prop,
None => {
debug!("Claim {} is invalid for permanode {}: \
missing sort key",
claim_id, permanode_id);
return;
}
};
// Currently, no validation is done; every claim is accepted
// In the future, we'd have ways of checking a claim, such as public
// key signatures (permanode has key, claim has signature)
self.claims.insert(sort_value.clone(), claim_id.clone());
match self.nodetype {
PermanodeType::Set => {
// Keep the whole set of values
// TODO: handle set deletion claims
}
PermanodeType::Single => {
// Keep one value, the latest by sorting order
if self.claims.len() > 1 {
let mut map = BTreeMap::new();
swap(&mut self.claims, &mut map);
let mut map = map.into_iter();
let (k, v) = match self.sort {
Sort::Ascending(_) => map.next_back().unwrap(),
Sort::Descending(_) => map.next().unwrap(),
};
self.claims.insert(k, v);
}
}
}
}
}
fn insert_into_multimap<K: Clone + Eq + ::std::hash::Hash,
V: Eq + ::std::hash::Hash>(
multimap: &mut HashMap<K, HashSet<V>>,
key: &K,
value: V)
{
if let Some(set) = multimap.get_mut(key) {
set.insert(value);
return;
}
let mut set = HashSet::new();
set.insert(value); | /// Directory where objects are stored on disk.
path: PathBuf,
/// All objects, indexed by their ID.
objects: HashMap<ID, Object>,
/// Back references: value is all references pointing to the key.
backlinks: HashMap<ID, HashSet<(Backkey, ID)>>,
/// All claim objects, whether they are valid for permanode or not.
claims: HashMap<ID, HashSet<ID>>,
/// All permanodes, with valid associated claims.
permanodes: HashMap<ID, Permanode>,
root: ID,
log: Option<ID>,
policy: Box<dyn Policy>,
}
impl MemoryIndex {
/// Reads all the objects from a directory into memory.
pub fn open<P: AsRef<Path>>(path: P, root: ID)
-> errors::Result<MemoryIndex>
{
let path = path.as_ref();
let mut index = MemoryIndex {
path: path.to_path_buf(),
objects: HashMap::new(),
backlinks: HashMap::new(),
claims: HashMap::new(),
permanodes: HashMap::new(),
root: root.clone(),
log: None,
policy: Box::new(KeepPolicy::new()),
};
let dirlist = path.read_dir()
.map_err(|e| ("Error listing objects directory", e))?;
for first in dirlist {
let first = first
.map_err(|e| ("Error listing objects directory", e))?;
let dirlist = first.path().read_dir()
.map_err(|e| ("Error listing objects subdirectory", e))?;
for second in dirlist {
let second = second
.map_err(|e| ("Error listing objects subdirectory", e))?;
let filename = second.path();
// Read object
let fp = File::open(filename)
.map_err(|e| ("Error opening object", e))?;
let object = match serialize::deserialize(fp) {
Err(e) => {
let mut path: PathBuf = first.file_name().into();
path.push(second.file_name());
error!("Error deserializing object: {:?}", path);
return Err(("Error deserializing object", e).into());
}
Ok(o) => o,
};
index.insert_object_in_index(object);
}
}
// Parse root config
index.log = {
let config = index.get_object(&root)?
.ok_or(Error::CorruptedStore("Missing root object"))?;
let config = match config.data {
ObjectData::Dict(ref dict) => dict,
_ => return Err(Error::CorruptedStore(
"Root object is not a dict")),
};
match config.get("log") {
Some(&Property::Reference(ref id)) => {
let log_obj = index.get_object(id)?
.ok_or(Error::CorruptedStore("Missing log object"))?;
match log_obj.data {
ObjectData::Dict(_) => {
debug!("Activated log: {}", id);
}
_ => {
return Err(Error::CorruptedStore(
"Log is not a permanode"));
}
}
Some(id.clone())
}
Some(_) => return Err(Error::CorruptedStore(
"Log is not a reference")),
None => None,
}
};
Ok(index)
}
pub fn create<'a, P: AsRef<Path>, I: Iterator<Item=&'a Object>>(
path: P, objects: I)
-> io::Result<()>
{
for object in objects {
MemoryIndex::write_object(path.as_ref(), object)?;
}
Ok(())
}
fn write_object(dir: &Path, object: &Object) -> io::Result<()> {
let hashstr = object.id.str();
let mut path = dir.join(&hashstr[..4]);
if!path.exists() {
fs::create_dir(&path)?;
}
path.push(&hashstr[4..]);
let mut fp = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
serialize::serialize(&mut fp, object)
}
/// Utility to insert a new object in the store.
///
/// Insert the object, indexing the back references, and parsing the object
/// to handle permanodes.
fn insert_object_in_index(&mut self, object: Object) {
assert!(!self.objects.contains_key(&object.id));
{
// Record reverse references
// This is run on all values of type reference on the object,
// whether it is a list or a dict
let mut insert = |target: &ID, key: Backkey, source: ID| {
if log_enabled!(Level::Debug) {
match key {
Backkey::Key(ref k) => {
debug!("Reference {} -> {} ({})",
source, target, k);
}
Backkey::Index(i) => {
debug!("Reference {} -> {} ({})",
source, target, i);
}
}
}
// Add backlink
insert_into_multimap(&mut self.backlinks,
target, (key, source));
};
// Go over the object, calling insert() above on all its values of
// type reference
match object.data {
ObjectData::Dict(ref dict) => {
for (k, v) in dict {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Key(k.clone()),
object.id.clone());
}
}
}
ObjectData::List(ref list) => {
for (k, v) in list.into_iter().enumerate() {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Index(k),
object.id.clone());
}
}
}
}
}
// Check for special objects
if let ObjectData::Dict(ref dict) = object.data {
match dict.get("dhstore_kind") {
Some(&Property::String(ref kind)) => match kind as &str {
"permanode" => {
info!("Found permanode: {}", object.id);
self.index_permanode(&object);
}
"claim" => {
info!("Found claim: {}", object.id);
self.index_claim(&object);
}
kind => debug!("Found unknown kind {:?}", kind),
},
Some(_) => {
info!("Object has dhstore_kind with non-string value");
}
None => {}
}
}
// Now inserts the object
self.objects.insert(object.id.clone(), object);
}
fn index_permanode(&mut self, permanode: &Object) {
// Validate the permanode
let ref id = permanode.id;
let permanode = match permanode.data {
ObjectData::Dict(ref d) => d,
ObjectData::List(_) => {
panic!("Invalid permanode {}: not a dict", id);
}
};
match permanode.get("random") {
Some(&Property::String(ref s)) => {
if s.len()!= HASH_STR_SIZE {
warn!("Invalid permanode {}: invalid random size {}",
id, s.len());
return;
}
}
_ => {
warn!("Invalid permanode {}: missing random", id);
return;
}
}
let sort = match permanode.get("sort") {
Some(&Property::String(ref s)) => match s.parse() {
Ok(f) => f,
Err(()) => {
warn!("Invalid permanode {}: invalid sort", id);
return;
}
},
_ => {
warn!("Invalid permanode {}: invalid sort", id);
return;
}
};
let nodetype = match permanode.get("type") {
Some(&Property::String(ref s)) => match s as &str {
"set" | "single" => PermanodeType::Set,
_ => {
warn!("Unknown permanode type {:?}, ignoring permanode {}",
s, id);
return;
}
},
None => PermanodeType::Single,
Some(_) => {
warn!("Invalid permanode {}: invalid type", id);
return;
}
};
debug!("Permanode is well-formed, adding to index");
let mut node = Permanode { sort: sort,
nodetype: nodetype,
claims: BTreeMap::new() };
// Process claims
if let Some(set) = self.claims.get(id) {
for claim_id in set {
let claim = self.objects.get(claim_id).unwrap();
let claim = match claim.data {
ObjectData::Dict(ref d) => d,
_ => panic!("Invalid claim {}: not a dict", claim_id),
};
node.index_claim(claim, id, claim_id);
}
}
// Insert the permanode in the index
self.permanodes.insert(id.clone(), node);
}
fn index_claim(&mut self, claim: &Object) {
// Validate the claim
let id = &claim.id;
let claim = match claim.data {
ObjectData::Dict(ref d) => d,
_ => panic!("Invalid claim {}: not a dict", id),
};
let permanode = match (claim.get("node"), claim.get("value")) {
(Some(&Property::Reference(ref r)),
Some(&Property::Reference(_))) => r,
_ => {
warn!("Invalid claim {}: wrong content", id);
return;
}
};
// Insert the claim in the index
// Note that this means it is well-formed, not that it is valid;
// validity needs to be checked with the permanode
debug!("Claim is well-formed, adding to index");
insert_into_multimap(&mut self.claims, permanode, id.clone());
// If we have the permanode, index a valid claim
if let Some(node) = self.permanodes.get_mut(permanode) {
node.index_claim(claim, permanode, id);
}
}
/// Common logic for `verify()` and `collect_garbage().`
///
/// Goes over the tree of objects, checking for errors. If `collect` is
/// true, unreferenced objects are deleted, and the set of referenced blobs
/// is returned; else, an empty `HashSet` is returned.
fn walk(&mut self, collect: bool) -> errors::Result<HashSet<ID>> {
let mut alive = HashSet::new(); // ids
let mut live_blobs = HashSet::new(); // ids
let mut open = VecDeque::new(); // ids
if self.objects.get(&self.root).is_none() {
error!("Root is missing: {}", self.root);
} else {
open.push_front(self.root.clone());
}
while let Some(id) = open.pop_front() {
debug!("Walking, open={}, alive={}/{}, id={}",
open.len(), alive.len(), self.objects.len(), id);
let object = match self.objects.get(&id) {
Some(o) => o,
None => {
info!("Don't have object {}", id);
continue;
}
};
if alive.contains(&id) {
debug!(" already alive");
continue;
}
alive.insert(id);
let mut handle = |value: &Property| {
match *value {
Property::Reference(ref id) => {
open.push_back(id.clone());
}
Property::Blob(ref id) => {
if collect {
live_blobs.insert(id.clone());
}
}
_ => {}
}
};
match object.data {
ObjectData::Dict(ref dict) | multimap.insert(key.clone(), set);
}
/// The in-memory index, that loads all objects from the disk on startup.
pub struct MemoryIndex { | random_line_split |
memory_index.rs | -> (PolicyDecision, Box<dyn Policy>);
}
/// Placeholder Policy that keeps everything.
struct KeepPolicy;
impl KeepPolicy {
fn new() -> KeepPolicy {
KeepPolicy
}
}
impl Policy for KeepPolicy {
fn handle(&mut self, property: &str, object: Object)
-> (PolicyDecision, Box<dyn Policy>) {
(PolicyDecision::Keep, Box::new(KeepPolicy))
}
}
/// Key of a reference, used in the backward reference map.
///
/// A reference is a value, and can appear in both types of schema objects: in a
/// dict, it is associated with a string key, and in a list, with an index.
#[derive(PartialEq, Eq, Hash)]
enum Backkey {
/// Reference from a dict under this key.
Key(String),
/// Reference from a list from this index.
Index(usize),
}
enum PermanodeType {
Set,
Single,
}
struct Permanode {
sort: Sort,
nodetype: PermanodeType,
claims: BTreeMap<Property, ID>,
}
impl Permanode {
fn index_claim(&mut self, claim: &Dict, permanode_id: &ID, claim_id: &ID) {
// We require the claim to have the sort key
let sort_value: &Property = match claim.get(self.sort.field()) {
Some(ref prop) => prop,
None => {
debug!("Claim {} is invalid for permanode {}: \
missing sort key",
claim_id, permanode_id);
return;
}
};
// Currently, no validation is done; every claim is accepted
// In the future, we'd have ways of checking a claim, such as public
// key signatures (permanode has key, claim has signature)
self.claims.insert(sort_value.clone(), claim_id.clone());
match self.nodetype {
PermanodeType::Set => {
// Keep the whole set of values
// TODO: handle set deletion claims
}
PermanodeType::Single => {
// Keep one value, the latest by sorting order
if self.claims.len() > 1 {
let mut map = BTreeMap::new();
swap(&mut self.claims, &mut map);
let mut map = map.into_iter();
let (k, v) = match self.sort {
Sort::Ascending(_) => map.next_back().unwrap(),
Sort::Descending(_) => map.next().unwrap(),
};
self.claims.insert(k, v);
}
}
}
}
}
fn | <K: Clone + Eq + ::std::hash::Hash,
V: Eq + ::std::hash::Hash>(
multimap: &mut HashMap<K, HashSet<V>>,
key: &K,
value: V)
{
if let Some(set) = multimap.get_mut(key) {
set.insert(value);
return;
}
let mut set = HashSet::new();
set.insert(value);
multimap.insert(key.clone(), set);
}
/// The in-memory index, that loads all objects from the disk on startup.
pub struct MemoryIndex {
/// Directory where objects are stored on disk.
path: PathBuf,
/// All objects, indexed by their ID.
objects: HashMap<ID, Object>,
/// Back references: value is all references pointing to the key.
backlinks: HashMap<ID, HashSet<(Backkey, ID)>>,
/// All claim objects, whether they are valid for permanode or not.
claims: HashMap<ID, HashSet<ID>>,
/// All permanodes, with valid associated claims.
permanodes: HashMap<ID, Permanode>,
root: ID,
log: Option<ID>,
policy: Box<dyn Policy>,
}
impl MemoryIndex {
/// Reads all the objects from a directory into memory.
pub fn open<P: AsRef<Path>>(path: P, root: ID)
-> errors::Result<MemoryIndex>
{
let path = path.as_ref();
let mut index = MemoryIndex {
path: path.to_path_buf(),
objects: HashMap::new(),
backlinks: HashMap::new(),
claims: HashMap::new(),
permanodes: HashMap::new(),
root: root.clone(),
log: None,
policy: Box::new(KeepPolicy::new()),
};
let dirlist = path.read_dir()
.map_err(|e| ("Error listing objects directory", e))?;
for first in dirlist {
let first = first
.map_err(|e| ("Error listing objects directory", e))?;
let dirlist = first.path().read_dir()
.map_err(|e| ("Error listing objects subdirectory", e))?;
for second in dirlist {
let second = second
.map_err(|e| ("Error listing objects subdirectory", e))?;
let filename = second.path();
// Read object
let fp = File::open(filename)
.map_err(|e| ("Error opening object", e))?;
let object = match serialize::deserialize(fp) {
Err(e) => {
let mut path: PathBuf = first.file_name().into();
path.push(second.file_name());
error!("Error deserializing object: {:?}", path);
return Err(("Error deserializing object", e).into());
}
Ok(o) => o,
};
index.insert_object_in_index(object);
}
}
// Parse root config
index.log = {
let config = index.get_object(&root)?
.ok_or(Error::CorruptedStore("Missing root object"))?;
let config = match config.data {
ObjectData::Dict(ref dict) => dict,
_ => return Err(Error::CorruptedStore(
"Root object is not a dict")),
};
match config.get("log") {
Some(&Property::Reference(ref id)) => {
let log_obj = index.get_object(id)?
.ok_or(Error::CorruptedStore("Missing log object"))?;
match log_obj.data {
ObjectData::Dict(_) => {
debug!("Activated log: {}", id);
}
_ => {
return Err(Error::CorruptedStore(
"Log is not a permanode"));
}
}
Some(id.clone())
}
Some(_) => return Err(Error::CorruptedStore(
"Log is not a reference")),
None => None,
}
};
Ok(index)
}
pub fn create<'a, P: AsRef<Path>, I: Iterator<Item=&'a Object>>(
path: P, objects: I)
-> io::Result<()>
{
for object in objects {
MemoryIndex::write_object(path.as_ref(), object)?;
}
Ok(())
}
fn write_object(dir: &Path, object: &Object) -> io::Result<()> {
let hashstr = object.id.str();
let mut path = dir.join(&hashstr[..4]);
if!path.exists() {
fs::create_dir(&path)?;
}
path.push(&hashstr[4..]);
let mut fp = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
serialize::serialize(&mut fp, object)
}
/// Utility to insert a new object in the store.
///
/// Insert the object, indexing the back references, and parsing the object
/// to handle permanodes.
fn insert_object_in_index(&mut self, object: Object) {
assert!(!self.objects.contains_key(&object.id));
{
// Record reverse references
// This is run on all values of type reference on the object,
// whether it is a list or a dict
let mut insert = |target: &ID, key: Backkey, source: ID| {
if log_enabled!(Level::Debug) {
match key {
Backkey::Key(ref k) => {
debug!("Reference {} -> {} ({})",
source, target, k);
}
Backkey::Index(i) => {
debug!("Reference {} -> {} ({})",
source, target, i);
}
}
}
// Add backlink
insert_into_multimap(&mut self.backlinks,
target, (key, source));
};
// Go over the object, calling insert() above on all its values of
// type reference
match object.data {
ObjectData::Dict(ref dict) => {
for (k, v) in dict {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Key(k.clone()),
object.id.clone());
}
}
}
ObjectData::List(ref list) => {
for (k, v) in list.into_iter().enumerate() {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Index(k),
object.id.clone());
}
}
}
}
}
// Check for special objects
if let ObjectData::Dict(ref dict) = object.data {
match dict.get("dhstore_kind") {
Some(&Property::String(ref kind)) => match kind as &str {
"permanode" => {
info!("Found permanode: {}", object.id);
self.index_permanode(&object);
}
"claim" => {
info!("Found claim: {}", object.id);
self.index_claim(&object);
}
kind => debug!("Found unknown kind {:?}", kind),
},
Some(_) => {
info!("Object has dhstore_kind with non-string value");
}
None => {}
}
}
// Now inserts the object
self.objects.insert(object.id.clone(), object);
}
fn index_permanode(&mut self, permanode: &Object) {
// Validate the permanode
let ref id = permanode.id;
let permanode = match permanode.data {
ObjectData::Dict(ref d) => d,
ObjectData::List(_) => {
panic!("Invalid permanode {}: not a dict", id);
}
};
match permanode.get("random") {
Some(&Property::String(ref s)) => {
if s.len()!= HASH_STR_SIZE {
warn!("Invalid permanode {}: invalid random size {}",
id, s.len());
return;
}
}
_ => {
warn!("Invalid permanode {}: missing random", id);
return;
}
}
let sort = match permanode.get("sort") {
Some(&Property::String(ref s)) => match s.parse() {
Ok(f) => f,
Err(()) => {
warn!("Invalid permanode {}: invalid sort", id);
return;
}
},
_ => {
warn!("Invalid permanode {}: invalid sort", id);
return;
}
};
let nodetype = match permanode.get("type") {
Some(&Property::String(ref s)) => match s as &str {
"set" | "single" => PermanodeType::Set,
_ => {
warn!("Unknown permanode type {:?}, ignoring permanode {}",
s, id);
return;
}
},
None => PermanodeType::Single,
Some(_) => {
warn!("Invalid permanode {}: invalid type", id);
return;
}
};
debug!("Permanode is well-formed, adding to index");
let mut node = Permanode { sort: sort,
nodetype: nodetype,
claims: BTreeMap::new() };
// Process claims
if let Some(set) = self.claims.get(id) {
for claim_id in set {
let claim = self.objects.get(claim_id).unwrap();
let claim = match claim.data {
ObjectData::Dict(ref d) => d,
_ => panic!("Invalid claim {}: not a dict", claim_id),
};
node.index_claim(claim, id, claim_id);
}
}
// Insert the permanode in the index
self.permanodes.insert(id.clone(), node);
}
fn index_claim(&mut self, claim: &Object) {
// Validate the claim
let id = &claim.id;
let claim = match claim.data {
ObjectData::Dict(ref d) => d,
_ => panic!("Invalid claim {}: not a dict", id),
};
let permanode = match (claim.get("node"), claim.get("value")) {
(Some(&Property::Reference(ref r)),
Some(&Property::Reference(_))) => r,
_ => {
warn!("Invalid claim {}: wrong content", id);
return;
}
};
// Insert the claim in the index
// Note that this means it is well-formed, not that it is valid;
// validity needs to be checked with the permanode
debug!("Claim is well-formed, adding to index");
insert_into_multimap(&mut self.claims, permanode, id.clone());
// If we have the permanode, index a valid claim
if let Some(node) = self.permanodes.get_mut(permanode) {
node.index_claim(claim, permanode, id);
}
}
/// Common logic for `verify()` and `collect_garbage().`
///
/// Goes over the tree of objects, checking for errors. If `collect` is
/// true, unreferenced objects are deleted, and the set of referenced blobs
/// is returned; else, an empty `HashSet` is returned.
fn walk(&mut self, collect: bool) -> errors::Result<HashSet<ID>> {
let mut alive = HashSet::new(); // ids
let mut live_blobs = HashSet::new(); // ids
let mut open = VecDeque::new(); // ids
if self.objects.get(&self.root).is_none() {
error!("Root is missing: {}", self.root);
} else {
open.push_front(self.root.clone());
}
while let Some(id) = open.pop_front() {
debug!("Walking, open={}, alive={}/{}, id={}",
open.len(), alive.len(), self.objects.len(), id);
let object = match self.objects.get(&id) {
Some(o) => o,
None => {
info!("Don't have object {}", id);
continue;
}
};
if alive.contains(&id) {
debug!(" already alive");
continue;
}
alive.insert(id);
let mut handle = |value: &Property| {
match *value {
Property::Reference(ref id) => {
open.push_back(id.clone());
}
Property::Blob(ref id) => {
if collect {
live_blobs.insert(id.clone());
}
}
_ => {}
}
};
match object.data {
ObjectData::Dict(ref | insert_into_multimap | identifier_name |
memory_index.rs | -> (PolicyDecision, Box<dyn Policy>);
}
/// Placeholder Policy that keeps everything.
struct KeepPolicy;
impl KeepPolicy {
fn new() -> KeepPolicy {
KeepPolicy
}
}
impl Policy for KeepPolicy {
fn handle(&mut self, property: &str, object: Object)
-> (PolicyDecision, Box<dyn Policy>) {
(PolicyDecision::Keep, Box::new(KeepPolicy))
}
}
/// Key of a reference, used in the backward reference map.
///
/// A reference is a value, and can appear in both types of schema objects: in a
/// dict, it is associated with a string key, and in a list, with an index.
#[derive(PartialEq, Eq, Hash)]
enum Backkey {
/// Reference from a dict under this key.
Key(String),
/// Reference from a list from this index.
Index(usize),
}
enum PermanodeType {
Set,
Single,
}
struct Permanode {
sort: Sort,
nodetype: PermanodeType,
claims: BTreeMap<Property, ID>,
}
impl Permanode {
fn index_claim(&mut self, claim: &Dict, permanode_id: &ID, claim_id: &ID) {
// We require the claim to have the sort key
let sort_value: &Property = match claim.get(self.sort.field()) {
Some(ref prop) => prop,
None => {
debug!("Claim {} is invalid for permanode {}: \
missing sort key",
claim_id, permanode_id);
return;
}
};
// Currently, no validation is done; every claim is accepted
// In the future, we'd have ways of checking a claim, such as public
// key signatures (permanode has key, claim has signature)
self.claims.insert(sort_value.clone(), claim_id.clone());
match self.nodetype {
PermanodeType::Set => {
// Keep the whole set of values
// TODO: handle set deletion claims
}
PermanodeType::Single => {
// Keep one value, the latest by sorting order
if self.claims.len() > 1 |
}
}
}
}
fn insert_into_multimap<K: Clone + Eq + ::std::hash::Hash,
V: Eq + ::std::hash::Hash>(
multimap: &mut HashMap<K, HashSet<V>>,
key: &K,
value: V)
{
if let Some(set) = multimap.get_mut(key) {
set.insert(value);
return;
}
let mut set = HashSet::new();
set.insert(value);
multimap.insert(key.clone(), set);
}
/// The in-memory index, that loads all objects from the disk on startup.
pub struct MemoryIndex {
/// Directory where objects are stored on disk.
path: PathBuf,
/// All objects, indexed by their ID.
objects: HashMap<ID, Object>,
/// Back references: value is all references pointing to the key.
backlinks: HashMap<ID, HashSet<(Backkey, ID)>>,
/// All claim objects, whether they are valid for permanode or not.
claims: HashMap<ID, HashSet<ID>>,
/// All permanodes, with valid associated claims.
permanodes: HashMap<ID, Permanode>,
root: ID,
log: Option<ID>,
policy: Box<dyn Policy>,
}
impl MemoryIndex {
/// Reads all the objects from a directory into memory.
pub fn open<P: AsRef<Path>>(path: P, root: ID)
-> errors::Result<MemoryIndex>
{
let path = path.as_ref();
let mut index = MemoryIndex {
path: path.to_path_buf(),
objects: HashMap::new(),
backlinks: HashMap::new(),
claims: HashMap::new(),
permanodes: HashMap::new(),
root: root.clone(),
log: None,
policy: Box::new(KeepPolicy::new()),
};
let dirlist = path.read_dir()
.map_err(|e| ("Error listing objects directory", e))?;
for first in dirlist {
let first = first
.map_err(|e| ("Error listing objects directory", e))?;
let dirlist = first.path().read_dir()
.map_err(|e| ("Error listing objects subdirectory", e))?;
for second in dirlist {
let second = second
.map_err(|e| ("Error listing objects subdirectory", e))?;
let filename = second.path();
// Read object
let fp = File::open(filename)
.map_err(|e| ("Error opening object", e))?;
let object = match serialize::deserialize(fp) {
Err(e) => {
let mut path: PathBuf = first.file_name().into();
path.push(second.file_name());
error!("Error deserializing object: {:?}", path);
return Err(("Error deserializing object", e).into());
}
Ok(o) => o,
};
index.insert_object_in_index(object);
}
}
// Parse root config
index.log = {
let config = index.get_object(&root)?
.ok_or(Error::CorruptedStore("Missing root object"))?;
let config = match config.data {
ObjectData::Dict(ref dict) => dict,
_ => return Err(Error::CorruptedStore(
"Root object is not a dict")),
};
match config.get("log") {
Some(&Property::Reference(ref id)) => {
let log_obj = index.get_object(id)?
.ok_or(Error::CorruptedStore("Missing log object"))?;
match log_obj.data {
ObjectData::Dict(_) => {
debug!("Activated log: {}", id);
}
_ => {
return Err(Error::CorruptedStore(
"Log is not a permanode"));
}
}
Some(id.clone())
}
Some(_) => return Err(Error::CorruptedStore(
"Log is not a reference")),
None => None,
}
};
Ok(index)
}
pub fn create<'a, P: AsRef<Path>, I: Iterator<Item=&'a Object>>(
path: P, objects: I)
-> io::Result<()>
{
for object in objects {
MemoryIndex::write_object(path.as_ref(), object)?;
}
Ok(())
}
fn write_object(dir: &Path, object: &Object) -> io::Result<()> {
let hashstr = object.id.str();
let mut path = dir.join(&hashstr[..4]);
if!path.exists() {
fs::create_dir(&path)?;
}
path.push(&hashstr[4..]);
let mut fp = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
serialize::serialize(&mut fp, object)
}
/// Utility to insert a new object in the store.
///
/// Insert the object, indexing the back references, and parsing the object
/// to handle permanodes.
fn insert_object_in_index(&mut self, object: Object) {
assert!(!self.objects.contains_key(&object.id));
{
// Record reverse references
// This is run on all values of type reference on the object,
// whether it is a list or a dict
let mut insert = |target: &ID, key: Backkey, source: ID| {
if log_enabled!(Level::Debug) {
match key {
Backkey::Key(ref k) => {
debug!("Reference {} -> {} ({})",
source, target, k);
}
Backkey::Index(i) => {
debug!("Reference {} -> {} ({})",
source, target, i);
}
}
}
// Add backlink
insert_into_multimap(&mut self.backlinks,
target, (key, source));
};
// Go over the object, calling insert() above on all its values of
// type reference
match object.data {
ObjectData::Dict(ref dict) => {
for (k, v) in dict {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Key(k.clone()),
object.id.clone());
}
}
}
ObjectData::List(ref list) => {
for (k, v) in list.into_iter().enumerate() {
if let Property::Reference(ref id) = *v {
insert(id,
Backkey::Index(k),
object.id.clone());
}
}
}
}
}
// Check for special objects
if let ObjectData::Dict(ref dict) = object.data {
match dict.get("dhstore_kind") {
Some(&Property::String(ref kind)) => match kind as &str {
"permanode" => {
info!("Found permanode: {}", object.id);
self.index_permanode(&object);
}
"claim" => {
info!("Found claim: {}", object.id);
self.index_claim(&object);
}
kind => debug!("Found unknown kind {:?}", kind),
},
Some(_) => {
info!("Object has dhstore_kind with non-string value");
}
None => {}
}
}
// Now inserts the object
self.objects.insert(object.id.clone(), object);
}
fn index_permanode(&mut self, permanode: &Object) {
// Validate the permanode
let ref id = permanode.id;
let permanode = match permanode.data {
ObjectData::Dict(ref d) => d,
ObjectData::List(_) => {
panic!("Invalid permanode {}: not a dict", id);
}
};
match permanode.get("random") {
Some(&Property::String(ref s)) => {
if s.len()!= HASH_STR_SIZE {
warn!("Invalid permanode {}: invalid random size {}",
id, s.len());
return;
}
}
_ => {
warn!("Invalid permanode {}: missing random", id);
return;
}
}
let sort = match permanode.get("sort") {
Some(&Property::String(ref s)) => match s.parse() {
Ok(f) => f,
Err(()) => {
warn!("Invalid permanode {}: invalid sort", id);
return;
}
},
_ => {
warn!("Invalid permanode {}: invalid sort", id);
return;
}
};
let nodetype = match permanode.get("type") {
Some(&Property::String(ref s)) => match s as &str {
"set" | "single" => PermanodeType::Set,
_ => {
warn!("Unknown permanode type {:?}, ignoring permanode {}",
s, id);
return;
}
},
None => PermanodeType::Single,
Some(_) => {
warn!("Invalid permanode {}: invalid type", id);
return;
}
};
debug!("Permanode is well-formed, adding to index");
let mut node = Permanode { sort: sort,
nodetype: nodetype,
claims: BTreeMap::new() };
// Process claims
if let Some(set) = self.claims.get(id) {
for claim_id in set {
let claim = self.objects.get(claim_id).unwrap();
let claim = match claim.data {
ObjectData::Dict(ref d) => d,
_ => panic!("Invalid claim {}: not a dict", claim_id),
};
node.index_claim(claim, id, claim_id);
}
}
// Insert the permanode in the index
self.permanodes.insert(id.clone(), node);
}
fn index_claim(&mut self, claim: &Object) {
// Validate the claim
let id = &claim.id;
let claim = match claim.data {
ObjectData::Dict(ref d) => d,
_ => panic!("Invalid claim {}: not a dict", id),
};
let permanode = match (claim.get("node"), claim.get("value")) {
(Some(&Property::Reference(ref r)),
Some(&Property::Reference(_))) => r,
_ => {
warn!("Invalid claim {}: wrong content", id);
return;
}
};
// Insert the claim in the index
// Note that this means it is well-formed, not that it is valid;
// validity needs to be checked with the permanode
debug!("Claim is well-formed, adding to index");
insert_into_multimap(&mut self.claims, permanode, id.clone());
// If we have the permanode, index a valid claim
if let Some(node) = self.permanodes.get_mut(permanode) {
node.index_claim(claim, permanode, id);
}
}
/// Common logic for `verify()` and `collect_garbage().`
///
/// Goes over the tree of objects, checking for errors. If `collect` is
/// true, unreferenced objects are deleted, and the set of referenced blobs
/// is returned; else, an empty `HashSet` is returned.
fn walk(&mut self, collect: bool) -> errors::Result<HashSet<ID>> {
let mut alive = HashSet::new(); // ids
let mut live_blobs = HashSet::new(); // ids
let mut open = VecDeque::new(); // ids
if self.objects.get(&self.root).is_none() {
error!("Root is missing: {}", self.root);
} else {
open.push_front(self.root.clone());
}
while let Some(id) = open.pop_front() {
debug!("Walking, open={}, alive={}/{}, id={}",
open.len(), alive.len(), self.objects.len(), id);
let object = match self.objects.get(&id) {
Some(o) => o,
None => {
info!("Don't have object {}", id);
continue;
}
};
if alive.contains(&id) {
debug!(" already alive");
continue;
}
alive.insert(id);
let mut handle = |value: &Property| {
match *value {
Property::Reference(ref id) => {
open.push_back(id.clone());
}
Property::Blob(ref id) => {
if collect {
live_blobs.insert(id.clone());
}
}
_ => {}
}
};
match object.data {
ObjectData::Dict(ref | {
let mut map = BTreeMap::new();
swap(&mut self.claims, &mut map);
let mut map = map.into_iter();
let (k, v) = match self.sort {
Sort::Ascending(_) => map.next_back().unwrap(),
Sort::Descending(_) => map.next().unwrap(),
};
self.claims.insert(k, v);
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.