file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs | use std::mem;
use std::time::SystemTime; | #[cfg(not(feature="dynamic_mem"))]
const MAX_MEMORY_SLOTS: usize = 1024 * 128;
type Bits = u128;
const MARK_BITS_PER_SLOT: usize = mem::size_of::<Bits>();
const MARK_BITS: usize = MAX_MEMORY_SLOTS / MARK_BITS_PER_SLOT;
#[cfg(feature="dynamic_mem")]
type Mem = Vec<usize>;
#[cfg(not(feature="dynamic_mem"))]
type Mem = [usize; MAX_MEMORY_SLOTS] ;
pub const OBJECT_HEADER_SLOTS: usize = 1;
pub struct Memory {
head: usize,
mem: Mem,
mark_bits: [u128; MARK_BITS],
roots: Vec<usize>,
gc_count: usize,
allocates: usize,
last_gc_ms: u128,
total_gc_ms: u128,
lastgc_live_mem: usize,
lastgc_free_mem: usize,
show_gc: bool,
show_allocates: bool,
show_heap_map: bool,
show_free_list: bool,
}
impl<'a> IntoIterator for &'a Memory {
type Item = usize;
type IntoIter = MemoryIntoIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
MemoryIntoIterator {
mem: self,
scan: 0,
free: 0,
}
}
}
pub struct MemoryIntoIterator<'a> {
mem: &'a Memory,
scan: usize,
free: usize,
}
impl<'a> Iterator for MemoryIntoIterator<'a> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.scan == 0 {
self.scan = 1;
self.free = self.mem.head;
} else {
self.scan = self.mem.next_object_in_heap(self.scan);
}
while self.scan == self.free {
self.scan = self.mem.next_object_in_heap(self.free);
self.free = self.mem.get_fl_next(self.free);
}
if self.scan >= MAX_MEMORY_SLOTS - 1 {
return None;
} else {
return Some(self.scan);
}
}
}
#[cfg(feature = "dynamic_mem")]
fn im() -> Mem {
return vec![0; MAX_MEMORY_SLOTS];
}
#[cfg(not(feature = "dynamic_mem"))]
fn im() -> Mem {
return [0; MAX_MEMORY_SLOTS];
}
impl Memory {
pub fn initialze_memory() -> Memory {
let mut mem = Memory {
head: 1,
mem: im(),
mark_bits: [0; MARK_BITS],
roots: Vec::new(),
gc_count: 0,
allocates: 0,
lastgc_live_mem: 0,
lastgc_free_mem: 0,
last_gc_ms: 0,
total_gc_ms: 0,
show_gc: false,
show_allocates: false,
show_heap_map: false,
show_free_list: false,
};
mem.set_size(0, MAX_MEMORY_SLOTS); // magic memory at zero is heap_size
mem.set_size(mem.head, MAX_MEMORY_SLOTS - 2); // set initial object size as all heap
mem.set_fl_next(mem.head, 0);
mem
}
// objects API
// allocate_object (size) --- size is number of indexable slots
// add/remote_root () --- add to or remove from gc root set.
// element_size() - number of indexable slots - get_size() - OBJECT_HEADER_SLOTS
// at_put - store into object slot at index
// at -- fetch object slot at index
pub fn allocate_object(&mut self, unrounded_size: usize) -> usize {
self.allocates += 1;
let mut result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.gc();
result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.print_freelist();
self.print_heap();
panic!("out of memory");
}
}
result
}
pub fn live_objects(&self) -> MemoryIntoIterator {
return self.into_iter();
}
pub fn add_root(&mut self, obj: usize) {
self.roots.push(obj);
}
pub fn remove_root(&mut self, obj: usize) {
for i in 0..self.roots.len() {
if obj == self.roots[i] {
self.roots.remove(i);
return;
}
}
}
pub fn at_put(&mut self, obj: usize, index: usize, value: usize) {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&mut self.mem[ base.. base + slots ];
object[index] = value;
}
pub fn at(&self, obj: usize, index: usize) -> usize {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&self.mem[ base.. base + slots ];
return object[index];
}
pub fn element_size(&self, obj: usize) -> usize {
return self.mem[obj] - OBJECT_HEADER_SLOTS;
}
pub fn enable_show_heap_map(&mut self, enabled: bool) {
self.show_heap_map = enabled;
}
pub fn enable_show_freelist(&mut self, enabled: bool) {
self.show_free_list = enabled;
}
pub fn enable_show_gc(&mut self, enabled: bool) {
self.show_gc = enabled;
}
pub fn enable_show_allocates(&mut self, enabled: bool) {
self.show_allocates = enabled;
}
fn rounded_size(unrounded_size: usize) -> usize {
(unrounded_size + 1) &!(1) // rounded to 2
}
fn get_size(&self, obj: usize) -> usize {
return self.mem[obj];
}
fn set_size(&mut self, obj: usize, size: usize) {
self.mem[obj] = size;
}
fn next_object_in_heap(&self, obj: usize) -> usize {
return obj + self.get_size(obj);
}
//free list is linked off the first slot
fn get_fl_next(&self, obj: usize) -> usize {
return self.mem[obj + 1];
}
fn set_fl_next(&mut self, obj: usize, next: usize) {
self.mem[obj + 1] = next;
}
fn mark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] |= 1 << (obj % MARK_BITS_PER_SLOT);
}
fn unmark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] &=!(1 << (obj % MARK_BITS_PER_SLOT));
}
fn is_marked(&self, obj: usize) -> bool {
(self.mark_bits[obj / MARK_BITS_PER_SLOT] & (1 << (obj % MARK_BITS_PER_SLOT)))!= 0
}
fn allocate_object_nocompress(&mut self, unrounded_size: usize) -> usize {
let size = Memory::rounded_size(unrounded_size + OBJECT_HEADER_SLOTS);
let mut free = self.head;
while free!= 0 {
let avail = self.get_size(free);
if avail > size {
let newsize = avail - size;
if newsize < 2 {
panic!("remaining size is less than 2");
}
// shrink current free to smaller size
self.set_size(free, newsize);
// new object is on the end of current free object
let new_object = free + newsize;
self.set_size(new_object, size);
for index in 0..self.element_size(new_object) {
self.at_put(new_object, index, 0);
}
if self.show_allocates {
println!(
"Success: allocate_object returning -> {} size {}",
new_object, size
);
}
if self.head!= free {
if self.show_allocates {
println!("Reset head past intermediate free blocks \n");
let mut show = self.head;
while show!= free {
println!("Abandon {} size {}\n", show, self.get_size(show));
show = self.get_fl_next(show);
}
}
self.head = free;
}
return new_object;
}
free = self.get_fl_next(free);
}
0
}
pub fn gc(&mut self) {
let start = SystemTime::now();
for i in 0..self.roots.len() {
self.mark_and_scan(self.roots[i]);
}
self.sweep();
self.gc_count += 1;
if self.show_gc {
self.print_gc_stats();
}
match start.elapsed() {
Ok(elapsed) => {
self.last_gc_ms = elapsed.as_millis();
self.total_gc_ms += self.last_gc_ms;
}
Err(e) => {
println!("Error: {:?}", e);
}
}
}
fn sweep(&mut self) {
let mut scan = 1;
self.head = 0;
let mut tail = self.head;
self.lastgc_free_mem = 0;
self.lastgc_live_mem = 0;
while scan < MAX_MEMORY_SLOTS - 1 {
if self.is_marked(scan) {
self.unmark_object(scan);
self.lastgc_live_mem += self.get_size(scan);
} else {
self.lastgc_free_mem += self.get_size(scan);
if tail == 0 {
self.head = scan;
self.set_fl_next(scan, 0);
tail = scan;
} else {
if self.next_object_in_heap(tail) == scan {
self.set_size(tail, self.get_size(tail) + self.get_size(scan));
} else {
self.set_fl_next(tail, scan);
self.set_fl_next(scan, 0);
tail = scan;
}
}
}
scan = self.next_object_in_heap(scan);
}
if self.show_free_list {
self.print_freelist();
}
if self.show_heap_map {
self.print_heap();
}
}
fn mark_and_scan(&mut self, object: usize) {
if object == 0 || self.is_marked(object) {
return;
}
let slots = self.get_size(object);
self.mark_object(object);
for i in OBJECT_HEADER_SLOTS..slots {
self.mark_and_scan(self.mem[object + i]);
}
}
pub fn print_gc_stats(&self) {
println!(
"{} gcs, {} object allocates, Last GC: Live {} Dead {} in {} ms, Lifetime GC {} ms\n",
self.gc_count,
self.allocates,
self.lastgc_live_mem,
self.lastgc_free_mem,
self.last_gc_ms,
self.total_gc_ms,
);
}
fn print_heap(&mut self) {
print!("\x1B[{};{}H", 1, 1);
let mut scan = 1;
let mut count = 0;
let mut free = self.head;
while scan < MAX_MEMORY_SLOTS - 1 {
// skip free ones, print x's //
let mut num_chars_to_print = 0;
let mut char_to_print = '?';
if scan == free {
while scan == free {
char_to_print = 'x';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(free);
free = self.get_fl_next(free);
}
} else {
char_to_print = '.';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(scan);
}
for _i in 1..num_chars_to_print / 2 {
print!("{}", char_to_print);
count += 1;
if count % 120 == 0 {
print!("\n");
}
}
}
self.print_gc_stats();
}
pub fn print_freelist(&mut self) {
println!("\nprint_freelist: Head = {}", self.head);
let mut free = self.head;
let mut count = 0;
let mut total_free = 0;
while free!= 0 {
let size = self.get_size(free);
let next = self.get_fl_next(free);
total_free += self.get_size(free);
println!("{}: Free = {} {} slots next = {}", count, free, size, next);
free = next;
count += 1;
if count > MAX_MEMORY_SLOTS {
panic!()
}
}
println!(
"print_freelist {} elements, total free = {}\n",
count, total_free
);
}
} |
#[cfg(feature="dynamic_mem")]
const MAX_MEMORY_SLOTS: usize = 1024 * 1024 * 2; | random_line_split |
mod.rs | use std::mem;
use std::time::SystemTime;
#[cfg(feature="dynamic_mem")]
const MAX_MEMORY_SLOTS: usize = 1024 * 1024 * 2;
#[cfg(not(feature="dynamic_mem"))]
const MAX_MEMORY_SLOTS: usize = 1024 * 128;
type Bits = u128;
const MARK_BITS_PER_SLOT: usize = mem::size_of::<Bits>();
const MARK_BITS: usize = MAX_MEMORY_SLOTS / MARK_BITS_PER_SLOT;
#[cfg(feature="dynamic_mem")]
type Mem = Vec<usize>;
#[cfg(not(feature="dynamic_mem"))]
type Mem = [usize; MAX_MEMORY_SLOTS] ;
pub const OBJECT_HEADER_SLOTS: usize = 1;
pub struct Memory {
head: usize,
mem: Mem,
mark_bits: [u128; MARK_BITS],
roots: Vec<usize>,
gc_count: usize,
allocates: usize,
last_gc_ms: u128,
total_gc_ms: u128,
lastgc_live_mem: usize,
lastgc_free_mem: usize,
show_gc: bool,
show_allocates: bool,
show_heap_map: bool,
show_free_list: bool,
}
impl<'a> IntoIterator for &'a Memory {
type Item = usize;
type IntoIter = MemoryIntoIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
MemoryIntoIterator {
mem: self,
scan: 0,
free: 0,
}
}
}
pub struct MemoryIntoIterator<'a> {
mem: &'a Memory,
scan: usize,
free: usize,
}
impl<'a> Iterator for MemoryIntoIterator<'a> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.scan == 0 {
self.scan = 1;
self.free = self.mem.head;
} else {
self.scan = self.mem.next_object_in_heap(self.scan);
}
while self.scan == self.free {
self.scan = self.mem.next_object_in_heap(self.free);
self.free = self.mem.get_fl_next(self.free);
}
if self.scan >= MAX_MEMORY_SLOTS - 1 {
return None;
} else {
return Some(self.scan);
}
}
}
#[cfg(feature = "dynamic_mem")]
fn im() -> Mem {
return vec![0; MAX_MEMORY_SLOTS];
}
#[cfg(not(feature = "dynamic_mem"))]
fn im() -> Mem {
return [0; MAX_MEMORY_SLOTS];
}
impl Memory {
pub fn initialze_memory() -> Memory {
let mut mem = Memory {
head: 1,
mem: im(),
mark_bits: [0; MARK_BITS],
roots: Vec::new(),
gc_count: 0,
allocates: 0,
lastgc_live_mem: 0,
lastgc_free_mem: 0,
last_gc_ms: 0,
total_gc_ms: 0,
show_gc: false,
show_allocates: false,
show_heap_map: false,
show_free_list: false,
};
mem.set_size(0, MAX_MEMORY_SLOTS); // magic memory at zero is heap_size
mem.set_size(mem.head, MAX_MEMORY_SLOTS - 2); // set initial object size as all heap
mem.set_fl_next(mem.head, 0);
mem
}
// objects API
// allocate_object (size) --- size is number of indexable slots
// add/remote_root () --- add to or remove from gc root set.
// element_size() - number of indexable slots - get_size() - OBJECT_HEADER_SLOTS
// at_put - store into object slot at index
// at -- fetch object slot at index
pub fn allocate_object(&mut self, unrounded_size: usize) -> usize {
self.allocates += 1;
let mut result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.gc();
result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.print_freelist();
self.print_heap();
panic!("out of memory");
}
}
result
}
pub fn live_objects(&self) -> MemoryIntoIterator {
return self.into_iter();
}
pub fn add_root(&mut self, obj: usize) {
self.roots.push(obj);
}
pub fn remove_root(&mut self, obj: usize) {
for i in 0..self.roots.len() {
if obj == self.roots[i] {
self.roots.remove(i);
return;
}
}
}
pub fn at_put(&mut self, obj: usize, index: usize, value: usize) {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&mut self.mem[ base.. base + slots ];
object[index] = value;
}
pub fn at(&self, obj: usize, index: usize) -> usize {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&self.mem[ base.. base + slots ];
return object[index];
}
pub fn element_size(&self, obj: usize) -> usize {
return self.mem[obj] - OBJECT_HEADER_SLOTS;
}
pub fn enable_show_heap_map(&mut self, enabled: bool) {
self.show_heap_map = enabled;
}
pub fn enable_show_freelist(&mut self, enabled: bool) {
self.show_free_list = enabled;
}
pub fn enable_show_gc(&mut self, enabled: bool) {
self.show_gc = enabled;
}
pub fn enable_show_allocates(&mut self, enabled: bool) {
self.show_allocates = enabled;
}
fn rounded_size(unrounded_size: usize) -> usize {
(unrounded_size + 1) &!(1) // rounded to 2
}
fn get_size(&self, obj: usize) -> usize {
return self.mem[obj];
}
fn set_size(&mut self, obj: usize, size: usize) {
self.mem[obj] = size;
}
fn next_object_in_heap(&self, obj: usize) -> usize {
return obj + self.get_size(obj);
}
//free list is linked off the first slot
fn get_fl_next(&self, obj: usize) -> usize {
return self.mem[obj + 1];
}
fn set_fl_next(&mut self, obj: usize, next: usize) {
self.mem[obj + 1] = next;
}
fn mark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] |= 1 << (obj % MARK_BITS_PER_SLOT);
}
fn unmark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] &=!(1 << (obj % MARK_BITS_PER_SLOT));
}
fn is_marked(&self, obj: usize) -> bool {
(self.mark_bits[obj / MARK_BITS_PER_SLOT] & (1 << (obj % MARK_BITS_PER_SLOT)))!= 0
}
fn allocate_object_nocompress(&mut self, unrounded_size: usize) -> usize {
let size = Memory::rounded_size(unrounded_size + OBJECT_HEADER_SLOTS);
let mut free = self.head;
while free!= 0 {
let avail = self.get_size(free);
if avail > size {
let newsize = avail - size;
if newsize < 2 {
panic!("remaining size is less than 2");
}
// shrink current free to smaller size
self.set_size(free, newsize);
// new object is on the end of current free object
let new_object = free + newsize;
self.set_size(new_object, size);
for index in 0..self.element_size(new_object) {
self.at_put(new_object, index, 0);
}
if self.show_allocates {
println!(
"Success: allocate_object returning -> {} size {}",
new_object, size
);
}
if self.head!= free {
if self.show_allocates {
println!("Reset head past intermediate free blocks \n");
let mut show = self.head;
while show!= free {
println!("Abandon {} size {}\n", show, self.get_size(show));
show = self.get_fl_next(show);
}
}
self.head = free;
}
return new_object;
}
free = self.get_fl_next(free);
}
0
}
pub fn gc(&mut self) {
let start = SystemTime::now();
for i in 0..self.roots.len() {
self.mark_and_scan(self.roots[i]);
}
self.sweep();
self.gc_count += 1;
if self.show_gc {
self.print_gc_stats();
}
match start.elapsed() {
Ok(elapsed) => {
self.last_gc_ms = elapsed.as_millis();
self.total_gc_ms += self.last_gc_ms;
}
Err(e) => {
println!("Error: {:?}", e);
}
}
}
fn sweep(&mut self) {
let mut scan = 1;
self.head = 0;
let mut tail = self.head;
self.lastgc_free_mem = 0;
self.lastgc_live_mem = 0;
while scan < MAX_MEMORY_SLOTS - 1 {
if self.is_marked(scan) {
self.unmark_object(scan);
self.lastgc_live_mem += self.get_size(scan);
} else {
self.lastgc_free_mem += self.get_size(scan);
if tail == 0 {
self.head = scan;
self.set_fl_next(scan, 0);
tail = scan;
} else {
if self.next_object_in_heap(tail) == scan {
self.set_size(tail, self.get_size(tail) + self.get_size(scan));
} else {
self.set_fl_next(tail, scan);
self.set_fl_next(scan, 0);
tail = scan;
}
}
}
scan = self.next_object_in_heap(scan);
}
if self.show_free_list {
self.print_freelist();
}
if self.show_heap_map {
self.print_heap();
}
}
fn mark_and_scan(&mut self, object: usize) {
if object == 0 || self.is_marked(object) |
let slots = self.get_size(object);
self.mark_object(object);
for i in OBJECT_HEADER_SLOTS..slots {
self.mark_and_scan(self.mem[object + i]);
}
}
pub fn print_gc_stats(&self) {
println!(
"{} gcs, {} object allocates, Last GC: Live {} Dead {} in {} ms, Lifetime GC {} ms\n",
self.gc_count,
self.allocates,
self.lastgc_live_mem,
self.lastgc_free_mem,
self.last_gc_ms,
self.total_gc_ms,
);
}
fn print_heap(&mut self) {
print!("\x1B[{};{}H", 1, 1);
let mut scan = 1;
let mut count = 0;
let mut free = self.head;
while scan < MAX_MEMORY_SLOTS - 1 {
// skip free ones, print x's //
let mut num_chars_to_print = 0;
let mut char_to_print = '?';
if scan == free {
while scan == free {
char_to_print = 'x';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(free);
free = self.get_fl_next(free);
}
} else {
char_to_print = '.';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(scan);
}
for _i in 1..num_chars_to_print / 2 {
print!("{}", char_to_print);
count += 1;
if count % 120 == 0 {
print!("\n");
}
}
}
self.print_gc_stats();
}
pub fn print_freelist(&mut self) {
println!("\nprint_freelist: Head = {}", self.head);
let mut free = self.head;
let mut count = 0;
let mut total_free = 0;
while free!= 0 {
let size = self.get_size(free);
let next = self.get_fl_next(free);
total_free += self.get_size(free);
println!("{}: Free = {} {} slots next = {}", count, free, size, next);
free = next;
count += 1;
if count > MAX_MEMORY_SLOTS {
panic!()
}
}
println!(
"print_freelist {} elements, total free = {}\n",
count, total_free
);
}
}
| {
return;
} | conditional_block |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") {
util::get_build_info();
return;
}
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn run_main<V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server(
server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV:'static + Vertex,
VVI:'static + Iterator<Item = VV> + Send,
EE:'static + Edge,
EEI:'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) | (ip, rpc_port)
}
}
| {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port); | identifier_body |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") |
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn run_main<V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server(
server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV:'static + Vertex,
VVI:'static + Iterator<Item = VV> + Send,
EE:'static + Edge,
EEI:'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
}
}
| {
util::get_build_info();
return;
} | conditional_block |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") {
util::get_build_info();
return;
}
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn run_main<V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server( | server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV:'static + Vertex,
VVI:'static + Iterator<Item = VV> + Send,
EE:'static + Edge,
EEI:'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
}
} | random_line_split |
|
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") {
util::get_build_info();
return;
}
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn | <V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server(
server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV:'static + Vertex,
VVI:'static + Iterator<Item = VV> + Send,
EE:'static + Edge,
EEI:'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex +'static,
VI: Iterator<Item = V> + Send +'static,
E: Edge +'static,
EI: Iterator<Item = E> + Send +'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
}
}
| run_main | identifier_name |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().persistence().time_interval()
}
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn session_cleanup_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
|
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
}
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send +'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_()!= "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
} | // wait for the rest to exit
future::join_all(sidecars).await; | random_line_split |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration |
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn session_cleanup_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
// wait for the rest to exit
future::join_all(sidecars).await;
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
}
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send +'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_()!= "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
}
| {
settings.broker().persistence().time_interval()
} | identifier_body |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().persistence().time_interval()
}
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn | (&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
// wait for the rest to exit
future::join_all(sidecars).await;
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
}
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send +'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_()!= "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
}
| session_cleanup_interval | identifier_name |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().persistence().time_interval()
}
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn session_cleanup_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
// wait for the rest to exit
future::join_all(sidecars).await;
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => |
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send +'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_()!= "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
}
| {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
} | conditional_block |
cli.rs | fd: c_int) -> IoResult<Self> {
let mut attrs = MaybeUninit::uninit();
to_io_result(unsafe { libc::tcgetattr(fd, attrs.as_mut_ptr()) })?;
Ok(TerminalAttributes {
inner: unsafe { attrs.assume_init() },
})
}
/// Create a new TerminalAttributes, with an "empty" state (no flags
/// enabled).
pub fn new_empty() -> Self {
TerminalAttributes {
inner: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
fn apply(&self, fd: c_int) -> IoResult<()> {
to_io_result(unsafe { libc::tcsetattr(fd, libc::TCSANOW, &self.inner) })
}
/// Test whether or not the given `TerminalFlag` is currently enabled.
pub fn is_enabled(&self, flag: TerminalFlag) -> bool {
self.inner.c_lflag & flag.to_value()!= 0
}
}
impl PartialEq for TerminalAttributes {
fn eq(&self, other: &Self) -> bool {
self.inner.c_iflag == other.inner.c_iflag
&& self.inner.c_oflag == other.inner.c_oflag
&& self.inner.c_cflag == other.inner.c_cflag
&& self.inner.c_lflag == other.inner.c_lflag
&& self.inner.c_line == other.inner.c_line
&& self.inner.c_cc == other.inner.c_cc
&& self.inner.c_ispeed == other.inner.c_ispeed
&& self.inner.c_ospeed == other.inner.c_ospeed
}
}
impl Eq for TerminalAttributes {}
fn debug_format_flag_field(
v: libc::tcflag_t,
fs: &'static [(&'static str, libc::tcflag_t)],
) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
let mut remaining_v: libc::tcflag_t = v;
let mut s = String::new();
for &(fname, fvalue) in fs {
if (v & fvalue)!= 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}",
match was_empty {
true => "",
false => " | ",
},
fname
)?;
remaining_v &=!v;
}
}
if remaining_v!= 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}(extra: {:x})",
match was_empty {
true => "",
false => " ",
},
remaining_v
)?;
}
Ok(s)
}
fn debug_format_c_cc_field(c_cc: &[libc::cc_t; 32]) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
const INDICES: &'static [(&'static str, usize)] = &[
("VDISCARD", libc::VDISCARD),
("VEOF", libc::VEOF),
("VEOL", libc::VEOL),
("VEOL2", libc::VEOL2),
("VERASE", libc::VERASE),
("VINTR", libc::VINTR),
("VKILL", libc::VKILL),
("VLNEXT", libc::VLNEXT),
("VMIN", libc::VMIN),
("VQUIT", libc::VQUIT),
("VREPRINT", libc::VREPRINT),
("VSTART", libc::VSTART),
("VSTOP", libc::VSTOP),
("VSUSP", libc::VSUSP),
("VSWTC", libc::VSWTC),
("VTIME", libc::VTIME),
("VWERASE", libc::VWERASE),
];
let mut s = String::new();
for &(name, idx) in INDICES {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}:{}",
match was_empty {
true => "",
false => ", ",
},
name,
c_cc[idx]
)?;
}
Ok(s)
}
impl fmt::Debug for TerminalAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TerminalAttributes")
.field(
"c_iflag",
&debug_format_flag_field(
self.inner.c_iflag,
&[
("IGNBRK", libc::IGNBRK),
("BRKINT", libc::BRKINT),
("IGNPAR", libc::IGNPAR),
("PARMRK", libc::PARMRK),
("INPCK", libc::INPCK),
("ISTRIP", libc::ISTRIP),
("INLCR", libc::INLCR),
("IGNCR", libc::IGNCR),
("ICRNL", libc::ICRNL),
("IXON", libc::IXON),
("IXANY", libc::IXANY),
("IXOFF", libc::IXOFF),
("IMAXBEL", libc::IMAXBEL),
("IUTF8", libc::IUTF8),
],
)?,
)
.field(
"c_oflag",
&debug_format_flag_field(
self.inner.c_oflag,
&[
("OPOST", libc::OPOST),
("OLCUC", libc::OLCUC),
("ONLCR", libc::ONLCR),
("ONOCR", libc::ONOCR),
("ONLRET", libc::ONLRET),
("OFILL", libc::OFILL),
("OFDEL", libc::OFDEL),
("NLDLY", libc::NLDLY),
("CRDLY", libc::CRDLY),
("TABDLY", libc::TABDLY),
("BSDLY", libc::BSDLY),
("VTDLY", libc::VTDLY),
("FFDLY", libc::FFDLY),
],
)?,
)
.field(
"c_cflag",
&debug_format_flag_field(
self.inner.c_cflag,
&[
("CBAUD", libc::CBAUD),
("CBAUDEX", libc::CBAUDEX),
("CSIZE", libc::CSIZE),
("CSTOPB", libc::CSTOPB),
("CREAD", libc::CREAD),
("PARENB", libc::PARENB),
("PARODD", libc::PARODD),
("HUPCL", libc::HUPCL),
("CLOCAL", libc::CLOCAL),
("CIBAUD", libc::CIBAUD),
("CMSPAR", libc::CMSPAR),
("CRTSCTS", libc::CRTSCTS),
],
)?,
)
.field(
"c_lflag",
&debug_format_flag_field(
self.inner.c_lflag,
&[
("ISIG", libc::ISIG),
("ICANON", libc::ICANON),
("ECHO", libc::ECHO),
("ECHOE", libc::ECHOE),
("ECHOK", libc::ECHOK),
("ECHONL", libc::ECHONL),
("ECHOCTL", libc::ECHOCTL),
("ECHOPRT", libc::ECHOPRT),
("ECHOKE", libc::ECHOKE),
("FLUSHO", libc::FLUSHO),
("NOFLSH", libc::NOFLSH),
("TOSTOP", libc::TOSTOP),
("PENDIN", libc::PENDIN),
("IEXTEN", libc::IEXTEN),
],
)?,
)
.field("c_cc", &debug_format_c_cc_field(&self.inner.c_cc)?)
.field("c_ispeed", unsafe { &libc::cfgetispeed(&self.inner) })
.field("c_ospeed", unsafe { &libc::cfgetospeed(&self.inner) })
.finish()
}
}
impl AbstractTerminalAttributes for TerminalAttributes {
fn enable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag |= flag.to_value();
}
fn disable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag &=!flag.to_value();
}
}
/// This trait describes an abstract input or output stream.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractStream {
/// A type which describes the attributes of this stream / terminal.
type Attributes: AbstractTerminalAttributes + fmt::Debug;
/// Returns whether or not this stream refers to an interactive terminal (a
/// TTY), as opposed to, for example, a pipe.
fn isatty(&self) -> bool;
/// Retrieve the current attributes of this stream / terminal.
fn get_attributes(&self) -> IoResult<Self::Attributes>;
/// Modify this stream's / terminal's attributes to match the given state.
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()>;
/// Return a `Read` for this stream, if reading is supported.
fn as_reader(&self) -> Option<Box<dyn Read>>;
/// Return a `Write` for this stream, if writing is supported.
fn as_writer(&self) -> Option<Box<dyn Write>>;
}
/// Standard input / output streams.
#[derive(Debug)]
pub enum Stream {
/// Standard output.
Stdout,
/// Standard error.
Stderr,
/// Standard input.
Stdin,
}
impl Stream {
fn to_fd(&self) -> c_int |
}
impl AbstractStream for Stream {
type Attributes = TerminalAttributes;
fn isatty(&self) -> bool {
let ret = unsafe { libc::isatty(self.to_fd()) };
let error: i32 = errno::errno().into();
match ret {
1 => true,
0 => match error {
libc::EBADF => false,
libc::ENOTTY => false,
_ => {
debug!(
"Unrecognized isatty errno: {}; assuming {:?} is not a TTY",
error, *self
);
false
}
},
_ => {
debug!(
"Unrecognized isatty return code: {}; assuming {:?} is not a TTY",
ret, *self
);
false
}
}
}
fn get_attributes(&self) -> IoResult<Self::Attributes> {
TerminalAttributes::new(self.to_fd())
}
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()> {
let ret = attributes.apply(self.to_fd());
debug_assert!(ret.is_err() || *attributes == Self::Attributes::new(self.to_fd()).unwrap());
ret
}
fn as_reader(&self) -> Option<Box<dyn Read>> {
match *self {
Stream::Stdin => Some(Box::new(io::stdin())),
_ => None,
}
}
fn as_writer(&self) -> Option<Box<dyn Write>> {
match *self {
Stream::Stdout => Some(Box::new(io::stdout())),
Stream::Stderr => Some(Box::new(io::stderr())),
_ => None,
}
}
}
/// This structure handles a) disabling the echoing of characters typed to
/// `Stdin`, and b) remembering to reset the terminal attributes afterwards
/// (via `Drop`).
struct DisableEcho<'s, S: AbstractStream> {
stream: &'s mut S,
initial_attributes: S::Attributes,
}
impl<'s, S: AbstractStream> DisableEcho<'s, S> {
fn new(stream: &'s mut S) -> Result<Self> {
let initial_attributes = stream.get_attributes()?;
debug!("Initial stream attributes: {:#?}", initial_attributes);
let mut attributes = stream.get_attributes()?;
// Don't echo characters typed to stdin.
attributes.disable(TerminalFlag::Echo);
// But, *do* echo the newline when the user hits ENTER.
attributes.enable(TerminalFlag::EchoNewlines);
debug!("Setting attributes to: {:#?}", attributes);
stream.set_attributes(&attributes)?;
Ok(DisableEcho {
stream: stream,
initial_attributes: initial_attributes,
})
}
}
impl<'s, S: AbstractStream> Drop for DisableEcho<'s, S> {
fn drop(&mut self) {
self.stream
.set_attributes(&self.initial_attributes)
.unwrap();
}
}
fn require_isatty<S: AbstractStream>(s: &mut S) -> Result<()> {
if!s.isatty() {
Err(Error::Precondition(format!(
"cannot prompt interactively when the I/O streams are not TTYs"
)))
} else {
Ok(())
}
}
fn build_input_reader<IS: AbstractStream>(
input_stream: &mut IS,
) -> Result<io::BufReader<Box<dyn Read>>> {
require_isatty(input_stream)?;
Ok(io::BufReader::new(match input_stream.as_reader() {
None => {
return Err(Error::Precondition(format!(
"the given input stream must support `Read`"
)))
}
Some(r) => r,
}))
}
fn remove_newline(mut s: String) -> Result<String> {
// Remove the trailing newline (if any - not finding one is an error).
if!s.ends_with('\n') {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected end of input").into());
}
s.pop();
// If this is windows and so there's also a \r, remove that too.
if s.ends_with('\r') {
s.pop();
}
Ok(s)
}
fn prompt_for_string_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
// We have to take the reader as a parameter, since it must be "global",
// even if this function is e.g. called in a loop. Otherwise, because it's
// buffered, we might buffer some input and then discard it.
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
use io::BufRead;
require_isatty(output_stream)?;
// It's fine to construct a separate writer, potentially on each loop
// iteration or whatever, because we flush immediately, and don't do any
// buffering.
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "{}", prompt)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
Ok({
let _disable_echo = match is_sensitive {
false => None,
true => Some(DisableEcho::new(input_stream)?),
};
let mut ret = String::new();
input_reader.read_line(&mut ret)?;
remove_newline(ret)?
})
}
/// Prompt the user for a string (read from the given input stream) using the
/// given output stream (typically standard output or standard error) to display
/// the given prompt message.
///
/// If `is_sensitive` is true, then the users characters will not be echoed back
/// (e.g. this will behave like a password prompt).
///
/// Note that there are various requirements for the given streams, and this
/// function will return an error if any of them are not met:
///
/// - Both `input_stream` and `output_stream` must be TTYs.
/// - `input_stream` must return a valid `Read` instance.
/// - `output_stream` must return a valid `Write` instance.
pub fn prompt_for_string<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
fn prompt_for_string_confirm_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
loop {
let string = prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
prompt,
is_sensitive,
)?;
if string
== prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
"Confirm: ",
is_sensitive,
)?
{
return Ok(string);
}
}
}
/// Prompt for a string as per `prompt_for_string`, but additionally have the
/// user enter the value again to confirm we get the same answer twice. This is
/// useful for e.g. password entry.
pub fn prompt_for_string_confirm<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
/// MaybePromptedString is a wrapper for | {
match *self {
Stream::Stdout => libc::STDOUT_FILENO,
Stream::Stderr => libc::STDERR_FILENO,
Stream::Stdin => libc::STDIN_FILENO,
}
} | identifier_body |
cli.rs | fd: c_int) -> IoResult<Self> {
let mut attrs = MaybeUninit::uninit();
to_io_result(unsafe { libc::tcgetattr(fd, attrs.as_mut_ptr()) })?;
Ok(TerminalAttributes {
inner: unsafe { attrs.assume_init() },
})
}
/// Create a new TerminalAttributes, with an "empty" state (no flags
/// enabled).
pub fn new_empty() -> Self {
TerminalAttributes {
inner: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
fn apply(&self, fd: c_int) -> IoResult<()> {
to_io_result(unsafe { libc::tcsetattr(fd, libc::TCSANOW, &self.inner) })
}
/// Test whether or not the given `TerminalFlag` is currently enabled.
pub fn is_enabled(&self, flag: TerminalFlag) -> bool {
self.inner.c_lflag & flag.to_value()!= 0
}
}
impl PartialEq for TerminalAttributes {
fn eq(&self, other: &Self) -> bool {
self.inner.c_iflag == other.inner.c_iflag
&& self.inner.c_oflag == other.inner.c_oflag
&& self.inner.c_cflag == other.inner.c_cflag
&& self.inner.c_lflag == other.inner.c_lflag
&& self.inner.c_line == other.inner.c_line
&& self.inner.c_cc == other.inner.c_cc
&& self.inner.c_ispeed == other.inner.c_ispeed
&& self.inner.c_ospeed == other.inner.c_ospeed
}
}
impl Eq for TerminalAttributes {}
fn debug_format_flag_field(
v: libc::tcflag_t,
fs: &'static [(&'static str, libc::tcflag_t)],
) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
let mut remaining_v: libc::tcflag_t = v;
let mut s = String::new();
for &(fname, fvalue) in fs {
if (v & fvalue)!= 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}",
match was_empty {
true => "",
false => " | ",
},
fname
)?;
remaining_v &=!v;
}
}
if remaining_v!= 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}(extra: {:x})",
match was_empty {
true => "",
false => " ",
},
remaining_v
)?;
}
Ok(s)
}
fn debug_format_c_cc_field(c_cc: &[libc::cc_t; 32]) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
const INDICES: &'static [(&'static str, usize)] = &[
("VDISCARD", libc::VDISCARD),
("VEOF", libc::VEOF),
("VEOL", libc::VEOL),
("VEOL2", libc::VEOL2),
("VERASE", libc::VERASE),
("VINTR", libc::VINTR),
("VKILL", libc::VKILL),
("VLNEXT", libc::VLNEXT),
("VMIN", libc::VMIN),
("VQUIT", libc::VQUIT),
("VREPRINT", libc::VREPRINT),
("VSTART", libc::VSTART),
("VSTOP", libc::VSTOP),
("VSUSP", libc::VSUSP),
("VSWTC", libc::VSWTC),
("VTIME", libc::VTIME),
("VWERASE", libc::VWERASE),
];
let mut s = String::new();
for &(name, idx) in INDICES {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}:{}",
match was_empty {
true => "",
false => ", ",
},
name,
c_cc[idx]
)?;
}
Ok(s)
}
impl fmt::Debug for TerminalAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TerminalAttributes")
.field(
"c_iflag",
&debug_format_flag_field(
self.inner.c_iflag,
&[
("IGNBRK", libc::IGNBRK),
("BRKINT", libc::BRKINT),
("IGNPAR", libc::IGNPAR),
("PARMRK", libc::PARMRK),
("INPCK", libc::INPCK),
("ISTRIP", libc::ISTRIP),
("INLCR", libc::INLCR),
("IGNCR", libc::IGNCR),
("ICRNL", libc::ICRNL),
("IXON", libc::IXON),
("IXANY", libc::IXANY),
("IXOFF", libc::IXOFF),
("IMAXBEL", libc::IMAXBEL),
("IUTF8", libc::IUTF8),
],
)?,
)
.field(
"c_oflag",
&debug_format_flag_field(
self.inner.c_oflag,
&[
("OPOST", libc::OPOST),
("OLCUC", libc::OLCUC),
("ONLCR", libc::ONLCR),
("ONOCR", libc::ONOCR),
("ONLRET", libc::ONLRET),
("OFILL", libc::OFILL),
("OFDEL", libc::OFDEL),
("NLDLY", libc::NLDLY),
("CRDLY", libc::CRDLY),
("TABDLY", libc::TABDLY),
("BSDLY", libc::BSDLY),
("VTDLY", libc::VTDLY),
("FFDLY", libc::FFDLY),
],
)?,
)
.field(
"c_cflag",
&debug_format_flag_field(
self.inner.c_cflag,
&[
("CBAUD", libc::CBAUD),
("CBAUDEX", libc::CBAUDEX),
("CSIZE", libc::CSIZE),
("CSTOPB", libc::CSTOPB),
("CREAD", libc::CREAD),
("PARENB", libc::PARENB),
("PARODD", libc::PARODD),
("HUPCL", libc::HUPCL),
("CLOCAL", libc::CLOCAL),
("CIBAUD", libc::CIBAUD),
("CMSPAR", libc::CMSPAR),
("CRTSCTS", libc::CRTSCTS),
],
)?,
)
.field(
"c_lflag",
&debug_format_flag_field(
self.inner.c_lflag,
&[
("ISIG", libc::ISIG),
("ICANON", libc::ICANON),
("ECHO", libc::ECHO),
("ECHOE", libc::ECHOE),
("ECHOK", libc::ECHOK),
("ECHONL", libc::ECHONL),
("ECHOCTL", libc::ECHOCTL),
("ECHOPRT", libc::ECHOPRT),
("ECHOKE", libc::ECHOKE),
("FLUSHO", libc::FLUSHO),
("NOFLSH", libc::NOFLSH),
("TOSTOP", libc::TOSTOP),
("PENDIN", libc::PENDIN),
("IEXTEN", libc::IEXTEN),
],
)?,
)
.field("c_cc", &debug_format_c_cc_field(&self.inner.c_cc)?)
.field("c_ispeed", unsafe { &libc::cfgetispeed(&self.inner) })
.field("c_ospeed", unsafe { &libc::cfgetospeed(&self.inner) })
.finish()
}
}
impl AbstractTerminalAttributes for TerminalAttributes {
fn enable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag |= flag.to_value();
}
fn disable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag &=!flag.to_value();
}
}
/// This trait describes an abstract input or output stream.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractStream {
/// A type which describes the attributes of this stream / terminal.
type Attributes: AbstractTerminalAttributes + fmt::Debug;
/// Returns whether or not this stream refers to an interactive terminal (a
/// TTY), as opposed to, for example, a pipe.
fn isatty(&self) -> bool;
/// Retrieve the current attributes of this stream / terminal.
fn get_attributes(&self) -> IoResult<Self::Attributes>;
/// Modify this stream's / terminal's attributes to match the given state.
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()>;
/// Return a `Read` for this stream, if reading is supported.
fn as_reader(&self) -> Option<Box<dyn Read>>;
/// Return a `Write` for this stream, if writing is supported.
fn as_writer(&self) -> Option<Box<dyn Write>>;
}
/// Standard input / output streams.
#[derive(Debug)]
pub enum Stream {
/// Standard output.
Stdout,
/// Standard error.
Stderr,
/// Standard input.
Stdin,
}
impl Stream {
fn to_fd(&self) -> c_int {
match *self {
Stream::Stdout => libc::STDOUT_FILENO,
Stream::Stderr => libc::STDERR_FILENO,
Stream::Stdin => libc::STDIN_FILENO,
}
}
}
impl AbstractStream for Stream {
type Attributes = TerminalAttributes;
fn isatty(&self) -> bool {
let ret = unsafe { libc::isatty(self.to_fd()) };
let error: i32 = errno::errno().into();
match ret {
1 => true,
0 => match error {
libc::EBADF => false,
libc::ENOTTY => false,
_ => {
debug!(
"Unrecognized isatty errno: {}; assuming {:?} is not a TTY",
error, *self
);
false
}
},
_ => {
debug!(
"Unrecognized isatty return code: {}; assuming {:?} is not a TTY",
ret, *self
);
false
}
}
}
fn get_attributes(&self) -> IoResult<Self::Attributes> {
TerminalAttributes::new(self.to_fd())
}
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()> {
let ret = attributes.apply(self.to_fd());
debug_assert!(ret.is_err() || *attributes == Self::Attributes::new(self.to_fd()).unwrap());
ret
}
fn as_reader(&self) -> Option<Box<dyn Read>> {
match *self {
Stream::Stdin => Some(Box::new(io::stdin())),
_ => None,
}
}
fn as_writer(&self) -> Option<Box<dyn Write>> {
match *self {
Stream::Stdout => Some(Box::new(io::stdout())),
Stream::Stderr => Some(Box::new(io::stderr())),
_ => None,
}
}
}
/// This structure handles a) disabling the echoing of characters typed to
/// `Stdin`, and b) remembering to reset the terminal attributes afterwards
/// (via `Drop`).
struct DisableEcho<'s, S: AbstractStream> {
stream: &'s mut S,
initial_attributes: S::Attributes,
}
impl<'s, S: AbstractStream> DisableEcho<'s, S> {
fn new(stream: &'s mut S) -> Result<Self> {
let initial_attributes = stream.get_attributes()?;
debug!("Initial stream attributes: {:#?}", initial_attributes);
let mut attributes = stream.get_attributes()?;
// Don't echo characters typed to stdin.
attributes.disable(TerminalFlag::Echo);
// But, *do* echo the newline when the user hits ENTER.
attributes.enable(TerminalFlag::EchoNewlines);
debug!("Setting attributes to: {:#?}", attributes);
stream.set_attributes(&attributes)?;
Ok(DisableEcho {
stream: stream,
initial_attributes: initial_attributes,
})
}
}
impl<'s, S: AbstractStream> Drop for DisableEcho<'s, S> {
fn drop(&mut self) {
self.stream
.set_attributes(&self.initial_attributes)
.unwrap();
}
}
fn require_isatty<S: AbstractStream>(s: &mut S) -> Result<()> {
if!s.isatty() {
Err(Error::Precondition(format!(
"cannot prompt interactively when the I/O streams are not TTYs"
)))
} else {
Ok(())
}
}
fn build_input_reader<IS: AbstractStream>(
input_stream: &mut IS,
) -> Result<io::BufReader<Box<dyn Read>>> {
require_isatty(input_stream)?;
Ok(io::BufReader::new(match input_stream.as_reader() {
None => {
return Err(Error::Precondition(format!(
"the given input stream must support `Read`"
)))
}
Some(r) => r,
}))
}
fn remove_newline(mut s: String) -> Result<String> {
// Remove the trailing newline (if any - not finding one is an error).
if!s.ends_with('\n') {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected end of input").into());
}
s.pop();
// If this is windows and so there's also a \r, remove that too.
if s.ends_with('\r') {
s.pop();
}
Ok(s)
}
fn prompt_for_string_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
// We have to take the reader as a parameter, since it must be "global",
// even if this function is e.g. called in a loop. Otherwise, because it's
// buffered, we might buffer some input and then discard it.
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
use io::BufRead;
require_isatty(output_stream)?;
// It's fine to construct a separate writer, potentially on each loop
// iteration or whatever, because we flush immediately, and don't do any
// buffering.
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "{}", prompt)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
Ok({
let _disable_echo = match is_sensitive {
false => None,
true => Some(DisableEcho::new(input_stream)?),
};
let mut ret = String::new();
input_reader.read_line(&mut ret)?;
remove_newline(ret)?
})
}
/// Prompt the user for a string (read from the given input stream) using the
/// given output stream (typically standard output or standard error) to display
/// the given prompt message.
///
/// If `is_sensitive` is true, then the users characters will not be echoed back
/// (e.g. this will behave like a password prompt).
///
/// Note that there are various requirements for the given streams, and this
/// function will return an error if any of them are not met:
///
/// - Both `input_stream` and `output_stream` must be TTYs.
/// - `input_stream` must return a valid `Read` instance.
/// - `output_stream` must return a valid `Write` instance.
pub fn | <IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
fn prompt_for_string_confirm_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
loop {
let string = prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
prompt,
is_sensitive,
)?;
if string
== prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
"Confirm: ",
is_sensitive,
)?
{
return Ok(string);
}
}
}
/// Prompt for a string as per `prompt_for_string`, but additionally have the
/// user enter the value again to confirm we get the same answer twice. This is
/// useful for e.g. password entry.
pub fn prompt_for_string_confirm<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
/// MaybePromptedString is a wrapper for | prompt_for_string | identifier_name |
cli.rs | (fd: c_int) -> IoResult<Self> {
let mut attrs = MaybeUninit::uninit();
to_io_result(unsafe { libc::tcgetattr(fd, attrs.as_mut_ptr()) })?;
Ok(TerminalAttributes {
inner: unsafe { attrs.assume_init() },
})
}
/// Create a new TerminalAttributes, with an "empty" state (no flags
/// enabled).
pub fn new_empty() -> Self {
TerminalAttributes {
inner: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
fn apply(&self, fd: c_int) -> IoResult<()> {
to_io_result(unsafe { libc::tcsetattr(fd, libc::TCSANOW, &self.inner) })
}
/// Test whether or not the given `TerminalFlag` is currently enabled.
pub fn is_enabled(&self, flag: TerminalFlag) -> bool {
self.inner.c_lflag & flag.to_value()!= 0
}
}
impl PartialEq for TerminalAttributes {
fn eq(&self, other: &Self) -> bool {
self.inner.c_iflag == other.inner.c_iflag | && self.inner.c_oflag == other.inner.c_oflag
&& self.inner.c_cflag == other.inner.c_cflag
&& self.inner.c_lflag == other.inner.c_lflag
&& self.inner.c_line == other.inner.c_line
&& self.inner.c_cc == other.inner.c_cc
&& self.inner.c_ispeed == other.inner.c_ispeed
&& self.inner.c_ospeed == other.inner.c_ospeed
}
}
impl Eq for TerminalAttributes {}
fn debug_format_flag_field(
v: libc::tcflag_t,
fs: &'static [(&'static str, libc::tcflag_t)],
) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
let mut remaining_v: libc::tcflag_t = v;
let mut s = String::new();
for &(fname, fvalue) in fs {
if (v & fvalue)!= 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}",
match was_empty {
true => "",
false => " | ",
},
fname
)?;
remaining_v &=!v;
}
}
if remaining_v!= 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}(extra: {:x})",
match was_empty {
true => "",
false => " ",
},
remaining_v
)?;
}
Ok(s)
}
fn debug_format_c_cc_field(c_cc: &[libc::cc_t; 32]) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
const INDICES: &'static [(&'static str, usize)] = &[
("VDISCARD", libc::VDISCARD),
("VEOF", libc::VEOF),
("VEOL", libc::VEOL),
("VEOL2", libc::VEOL2),
("VERASE", libc::VERASE),
("VINTR", libc::VINTR),
("VKILL", libc::VKILL),
("VLNEXT", libc::VLNEXT),
("VMIN", libc::VMIN),
("VQUIT", libc::VQUIT),
("VREPRINT", libc::VREPRINT),
("VSTART", libc::VSTART),
("VSTOP", libc::VSTOP),
("VSUSP", libc::VSUSP),
("VSWTC", libc::VSWTC),
("VTIME", libc::VTIME),
("VWERASE", libc::VWERASE),
];
let mut s = String::new();
for &(name, idx) in INDICES {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}:{}",
match was_empty {
true => "",
false => ", ",
},
name,
c_cc[idx]
)?;
}
Ok(s)
}
impl fmt::Debug for TerminalAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TerminalAttributes")
.field(
"c_iflag",
&debug_format_flag_field(
self.inner.c_iflag,
&[
("IGNBRK", libc::IGNBRK),
("BRKINT", libc::BRKINT),
("IGNPAR", libc::IGNPAR),
("PARMRK", libc::PARMRK),
("INPCK", libc::INPCK),
("ISTRIP", libc::ISTRIP),
("INLCR", libc::INLCR),
("IGNCR", libc::IGNCR),
("ICRNL", libc::ICRNL),
("IXON", libc::IXON),
("IXANY", libc::IXANY),
("IXOFF", libc::IXOFF),
("IMAXBEL", libc::IMAXBEL),
("IUTF8", libc::IUTF8),
],
)?,
)
.field(
"c_oflag",
&debug_format_flag_field(
self.inner.c_oflag,
&[
("OPOST", libc::OPOST),
("OLCUC", libc::OLCUC),
("ONLCR", libc::ONLCR),
("ONOCR", libc::ONOCR),
("ONLRET", libc::ONLRET),
("OFILL", libc::OFILL),
("OFDEL", libc::OFDEL),
("NLDLY", libc::NLDLY),
("CRDLY", libc::CRDLY),
("TABDLY", libc::TABDLY),
("BSDLY", libc::BSDLY),
("VTDLY", libc::VTDLY),
("FFDLY", libc::FFDLY),
],
)?,
)
.field(
"c_cflag",
&debug_format_flag_field(
self.inner.c_cflag,
&[
("CBAUD", libc::CBAUD),
("CBAUDEX", libc::CBAUDEX),
("CSIZE", libc::CSIZE),
("CSTOPB", libc::CSTOPB),
("CREAD", libc::CREAD),
("PARENB", libc::PARENB),
("PARODD", libc::PARODD),
("HUPCL", libc::HUPCL),
("CLOCAL", libc::CLOCAL),
("CIBAUD", libc::CIBAUD),
("CMSPAR", libc::CMSPAR),
("CRTSCTS", libc::CRTSCTS),
],
)?,
)
.field(
"c_lflag",
&debug_format_flag_field(
self.inner.c_lflag,
&[
("ISIG", libc::ISIG),
("ICANON", libc::ICANON),
("ECHO", libc::ECHO),
("ECHOE", libc::ECHOE),
("ECHOK", libc::ECHOK),
("ECHONL", libc::ECHONL),
("ECHOCTL", libc::ECHOCTL),
("ECHOPRT", libc::ECHOPRT),
("ECHOKE", libc::ECHOKE),
("FLUSHO", libc::FLUSHO),
("NOFLSH", libc::NOFLSH),
("TOSTOP", libc::TOSTOP),
("PENDIN", libc::PENDIN),
("IEXTEN", libc::IEXTEN),
],
)?,
)
.field("c_cc", &debug_format_c_cc_field(&self.inner.c_cc)?)
.field("c_ispeed", unsafe { &libc::cfgetispeed(&self.inner) })
.field("c_ospeed", unsafe { &libc::cfgetospeed(&self.inner) })
.finish()
}
}
impl AbstractTerminalAttributes for TerminalAttributes {
fn enable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag |= flag.to_value();
}
fn disable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag &=!flag.to_value();
}
}
/// This trait describes an abstract input or output stream.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractStream {
/// A type which describes the attributes of this stream / terminal.
type Attributes: AbstractTerminalAttributes + fmt::Debug;
/// Returns whether or not this stream refers to an interactive terminal (a
/// TTY), as opposed to, for example, a pipe.
fn isatty(&self) -> bool;
/// Retrieve the current attributes of this stream / terminal.
fn get_attributes(&self) -> IoResult<Self::Attributes>;
/// Modify this stream's / terminal's attributes to match the given state.
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()>;
/// Return a `Read` for this stream, if reading is supported.
fn as_reader(&self) -> Option<Box<dyn Read>>;
/// Return a `Write` for this stream, if writing is supported.
fn as_writer(&self) -> Option<Box<dyn Write>>;
}
/// Standard input / output streams.
#[derive(Debug)]
pub enum Stream {
/// Standard output.
Stdout,
/// Standard error.
Stderr,
/// Standard input.
Stdin,
}
impl Stream {
fn to_fd(&self) -> c_int {
match *self {
Stream::Stdout => libc::STDOUT_FILENO,
Stream::Stderr => libc::STDERR_FILENO,
Stream::Stdin => libc::STDIN_FILENO,
}
}
}
impl AbstractStream for Stream {
type Attributes = TerminalAttributes;
fn isatty(&self) -> bool {
let ret = unsafe { libc::isatty(self.to_fd()) };
let error: i32 = errno::errno().into();
match ret {
1 => true,
0 => match error {
libc::EBADF => false,
libc::ENOTTY => false,
_ => {
debug!(
"Unrecognized isatty errno: {}; assuming {:?} is not a TTY",
error, *self
);
false
}
},
_ => {
debug!(
"Unrecognized isatty return code: {}; assuming {:?} is not a TTY",
ret, *self
);
false
}
}
}
fn get_attributes(&self) -> IoResult<Self::Attributes> {
TerminalAttributes::new(self.to_fd())
}
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()> {
let ret = attributes.apply(self.to_fd());
debug_assert!(ret.is_err() || *attributes == Self::Attributes::new(self.to_fd()).unwrap());
ret
}
fn as_reader(&self) -> Option<Box<dyn Read>> {
match *self {
Stream::Stdin => Some(Box::new(io::stdin())),
_ => None,
}
}
fn as_writer(&self) -> Option<Box<dyn Write>> {
match *self {
Stream::Stdout => Some(Box::new(io::stdout())),
Stream::Stderr => Some(Box::new(io::stderr())),
_ => None,
}
}
}
/// This structure handles a) disabling the echoing of characters typed to
/// `Stdin`, and b) remembering to reset the terminal attributes afterwards
/// (via `Drop`).
struct DisableEcho<'s, S: AbstractStream> {
stream: &'s mut S,
initial_attributes: S::Attributes,
}
impl<'s, S: AbstractStream> DisableEcho<'s, S> {
fn new(stream: &'s mut S) -> Result<Self> {
let initial_attributes = stream.get_attributes()?;
debug!("Initial stream attributes: {:#?}", initial_attributes);
let mut attributes = stream.get_attributes()?;
// Don't echo characters typed to stdin.
attributes.disable(TerminalFlag::Echo);
// But, *do* echo the newline when the user hits ENTER.
attributes.enable(TerminalFlag::EchoNewlines);
debug!("Setting attributes to: {:#?}", attributes);
stream.set_attributes(&attributes)?;
Ok(DisableEcho {
stream: stream,
initial_attributes: initial_attributes,
})
}
}
impl<'s, S: AbstractStream> Drop for DisableEcho<'s, S> {
fn drop(&mut self) {
self.stream
.set_attributes(&self.initial_attributes)
.unwrap();
}
}
fn require_isatty<S: AbstractStream>(s: &mut S) -> Result<()> {
if!s.isatty() {
Err(Error::Precondition(format!(
"cannot prompt interactively when the I/O streams are not TTYs"
)))
} else {
Ok(())
}
}
fn build_input_reader<IS: AbstractStream>(
input_stream: &mut IS,
) -> Result<io::BufReader<Box<dyn Read>>> {
require_isatty(input_stream)?;
Ok(io::BufReader::new(match input_stream.as_reader() {
None => {
return Err(Error::Precondition(format!(
"the given input stream must support `Read`"
)))
}
Some(r) => r,
}))
}
fn remove_newline(mut s: String) -> Result<String> {
// Remove the trailing newline (if any - not finding one is an error).
if!s.ends_with('\n') {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected end of input").into());
}
s.pop();
// If this is windows and so there's also a \r, remove that too.
if s.ends_with('\r') {
s.pop();
}
Ok(s)
}
fn prompt_for_string_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
// We have to take the reader as a parameter, since it must be "global",
// even if this function is e.g. called in a loop. Otherwise, because it's
// buffered, we might buffer some input and then discard it.
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
use io::BufRead;
require_isatty(output_stream)?;
// It's fine to construct a separate writer, potentially on each loop
// iteration or whatever, because we flush immediately, and don't do any
// buffering.
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "{}", prompt)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
Ok({
let _disable_echo = match is_sensitive {
false => None,
true => Some(DisableEcho::new(input_stream)?),
};
let mut ret = String::new();
input_reader.read_line(&mut ret)?;
remove_newline(ret)?
})
}
/// Prompt the user for a string (read from the given input stream) using the
/// given output stream (typically standard output or standard error) to display
/// the given prompt message.
///
/// If `is_sensitive` is true, then the users characters will not be echoed back
/// (e.g. this will behave like a password prompt).
///
/// Note that there are various requirements for the given streams, and this
/// function will return an error if any of them are not met:
///
/// - Both `input_stream` and `output_stream` must be TTYs.
/// - `input_stream` must return a valid `Read` instance.
/// - `output_stream` must return a valid `Write` instance.
pub fn prompt_for_string<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
fn prompt_for_string_confirm_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
loop {
let string = prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
prompt,
is_sensitive,
)?;
if string
== prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
"Confirm: ",
is_sensitive,
)?
{
return Ok(string);
}
}
}
/// Prompt for a string as per `prompt_for_string`, but additionally have the
/// user enter the value again to confirm we get the same answer twice. This is
/// useful for e.g. password entry.
pub fn prompt_for_string_confirm<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
/// MaybePromptedString is a wrapper for getting | random_line_split |
|
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 {
2
} else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn cc(amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % | d_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) {
for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
}
}
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
}
| test_divisor == 0
}
fn fin | identifier_body |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 {
2
} else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn cc(amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % test_divisor == 0
}
fn find_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) { | }
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
} | for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
} | random_line_split |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 | else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn cc(amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % test_divisor == 0
}
fn find_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) {
for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
}
}
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
}
| {
2
} | conditional_block |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 {
2
} else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn | (amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % test_divisor == 0
}
fn find_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) {
for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
}
}
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
}
| cc | identifier_name |
lib.rs | Id;
pub use crate::interned::InternKey;
pub use crate::runtime::Runtime;
pub use crate::runtime::RuntimeId;
pub use crate::storage::Storage;
/// The base trait which your "query context" must implement. Gives
/// access to the salsa runtime, which you must embed into your query
/// context (along with whatever other state you may require).
pub trait Database: plumbing::DatabaseOps {
/// This function is invoked at key points in the salsa
/// runtime. It permits the database to be customized and to
/// inject logging or other custom behavior.
fn salsa_event(&self, event_fn: Event) {
#![allow(unused_variables)]
}
/// Starts unwinding the stack if the current revision is cancelled.
///
/// This method can be called by query implementations that perform
/// potentially expensive computations, in order to speed up propagation of
/// cancellation.
///
/// Cancellation will automatically be triggered by salsa on any query
/// invocation.
///
/// This method should not be overridden by `Database` implementors. A
/// `salsa_event` is emitted when this method is called, so that should be
/// used instead.
#[inline]
fn unwind_if_cancelled(&self) {
let runtime = self.salsa_runtime();
self.salsa_event(Event {
runtime_id: runtime.id(),
kind: EventKind::WillCheckCancellation,
});
let current_revision = runtime.current_revision();
let pending_revision = runtime.pending_revision();
log::debug!(
"unwind_if_cancelled: current_revision={:?}, pending_revision={:?}",
current_revision,
pending_revision
);
if pending_revision > current_revision {
runtime.unwind_cancelled();
}
}
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime(&self) -> &Runtime {
self.ops_salsa_runtime()
}
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime_mut(&mut self) -> &mut Runtime {
self.ops_salsa_runtime_mut()
}
}
/// The `Event` struct identifies various notable things that can
/// occur during salsa execution. Instances of this struct are given
/// to `salsa_event`.
pub struct Event {
/// The id of the snapshot that triggered the event. Usually
/// 1-to-1 with a thread, as well.
pub runtime_id: RuntimeId,
/// What sort of event was it.
pub kind: EventKind,
}
impl Event {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D:?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug +'me
where
D: plumbing::DatabaseOps,
{
EventDebug { event: self, db }
}
}
impl fmt::Debug for Event {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.runtime_id)
.field("kind", &self.kind)
.finish()
}
}
struct EventDebug<'me, D:?Sized>
where
D: plumbing::DatabaseOps,
{
event: &'me Event,
db: &'me D,
}
impl<'me, D:?Sized> fmt::Debug for EventDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.event.runtime_id)
.field("kind", &self.event.kind.debug(self.db))
.finish()
}
}
/// An enum identifying the various kinds of events that can occur.
pub enum EventKind {
/// Occurs when we found that all inputs to a memoized value are
/// up-to-date and hence the value can be re-used without
/// executing the closure.
///
/// Executes before the "re-used" value is returned.
DidValidateMemoizedValue {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that another thread (with id `other_runtime_id`) is processing the
/// given query (`database_key`), so we will block until they
/// finish.
///
/// Executes after we have registered with the other thread but
/// before they have answered us.
///
/// (NB: you can find the `id` of the current thread via the
/// `salsa_runtime`)
WillBlockOn {
/// The id of the runtime we will block on.
other_runtime_id: RuntimeId,
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that the function for this query will be executed.
/// This is either because it has never executed before or because
/// its inputs may be out of date.
WillExecute {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that `unwind_if_cancelled` was called and salsa will check if
/// the current revision has been cancelled.
WillCheckCancellation,
}
impl EventKind {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D:?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug +'me
where
D: plumbing::DatabaseOps,
{
EventKindDebug { kind: self, db }
}
}
impl fmt::Debug for EventKind {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", database_key)
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", other_runtime_id)
.field("database_key", database_key)
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", database_key)
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
struct EventKindDebug<'me, D:?Sized>
where
D: plumbing::DatabaseOps,
{
kind: &'me EventKind,
db: &'me D,
}
impl<'me, D:?Sized> fmt::Debug for EventKindDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", &other_runtime_id)
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
/// Indicates a database that also supports parallel query
/// evaluation. All of Salsa's base query support is capable of
/// parallel execution, but for it to work, your query key/value types
/// must also be `Send`, as must any additional data in your database.
pub trait ParallelDatabase: Database + Send {
/// Creates a second handle to the database that holds the
/// database fixed at a particular revision. So long as this
/// "frozen" handle exists, any attempt to [`set`] an input will
/// block.
///
/// [`set`]: struct.QueryTable.html#method.set
///
/// This is the method you are meant to use most of the time in a
/// parallel setting where modifications may arise asynchronously
/// (e.g., a language server). In this context, it is common to
/// wish to "fork off" a snapshot of the database performing some
/// series of queries in parallel and arranging the results. Using
/// this method for that purpose ensures that those queries will
/// see a consistent view of the database (it is also advisable
/// for those queries to use the [`Runtime::unwind_if_cancelled`]
/// method to check for cancellation).
///
/// # Panics
///
/// It is not permitted to create a snapshot from inside of a
/// query. Attepting to do so will panic.
///
/// # Deadlock warning
///
/// The intended pattern for snapshots is that, once created, they
/// are sent to another thread and used from there. As such, the
/// `snapshot` acquires a "read lock" on the database --
/// therefore, so long as the `snapshot` is not dropped, any
/// attempt to `set` a value in the database will block. If the
/// `snapshot` is owned by the same thread that is attempting to
/// `set`, this will cause a problem.
///
/// # How to implement this
///
/// Typically, this method will create a second copy of your
/// database type (`MyDatabaseType`, in the example below),
/// cloning over each of the fields from `self` into this new
/// copy. For the field that stores the salsa runtime, you should
/// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the
/// runtime. Finally, package up the result using `Snapshot::new`,
/// which is a simple wrapper type that only gives `&self` access
/// to the database within (thus preventing the use of methods
/// that may mutate the inputs):
///
/// [rfm]: struct.Runtime.html#method.snapshot
///
/// ```rust,ignore
/// impl ParallelDatabase for MyDatabaseType {
/// fn snapshot(&self) -> Snapshot<Self> {
/// Snapshot::new(
/// MyDatabaseType {
/// runtime: self.runtime.snapshot(self),
/// other_field: self.other_field.clone(),
/// }
/// )
/// }
/// }
/// ```
fn snapshot(&self) -> Snapshot<Self>;
}
/// Simple wrapper struct that takes ownership of a database `DB` and
/// only gives `&self` access to it. See [the `snapshot` method][fm]
/// for more details.
///
/// [fm]: trait.ParallelDatabase.html#method.snapshot
#[derive(Debug)]
pub struct Snapshot<DB:?Sized>
where
DB: ParallelDatabase,
{
db: DB,
}
impl<DB> Snapshot<DB>
where
DB: ParallelDatabase,
{
/// Creates a `Snapshot` that wraps the given database handle
/// `db`. From this point forward, only shared references to `db`
/// will be possible.
pub fn new(db: DB) -> Self {
Snapshot { db }
}
}
impl<DB> std::ops::Deref for Snapshot<DB>
where
DB: ParallelDatabase,
{
type Target = DB;
fn deref(&self) -> &DB {
&self.db
}
}
/// An integer that uniquely identifies a particular query instance within the
/// database. Used to track dependencies between queries. Fully ordered and
/// equatable but those orderings are arbitrary, and meant to be used only for
/// inserting into maps and the like.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DatabaseKeyIndex {
group_index: u16,
query_index: u16,
key_index: u32,
}
impl DatabaseKeyIndex {
/// Returns the index of the query group containing this key.
#[inline]
pub fn group_index(self) -> u16 {
self.group_index
}
/// Returns the index of the query within its query group.
#[inline]
pub fn query_index(self) -> u16 {
self.query_index
}
/// Returns the index of this particular query key within the query.
#[inline]
pub fn key_index(self) -> u32 {
self.key_index
}
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<D:?Sized>(self, db: &D) -> impl std::fmt::Debug + '_
where
D: plumbing::DatabaseOps,
{
DatabaseKeyIndexDebug { index: self, db }
}
}
/// Helper type for `DatabaseKeyIndex::debug`
struct DatabaseKeyIndexDebug<'me, D:?Sized>
where
D: plumbing::DatabaseOps,
{
index: DatabaseKeyIndex,
db: &'me D,
}
impl<D:?Sized> std::fmt::Debug for DatabaseKeyIndexDebug<'_, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.db.fmt_index(self.index, fmt)
}
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
///
/// Base trait of `Query` that has a lifetime parameter to allow the `DynDb` to be non-'static.
pub trait QueryDb<'d>: Sized {
/// Dyn version of the associated trait for this query group.
type DynDb:?Sized + Database + HasQueryGroup<Self::Group> + 'd;
/// Associate query group struct.
type Group: plumbing::QueryGroup<GroupStorage = Self::GroupStorage>;
/// Generated struct that contains storage for all queries in a group.
type GroupStorage;
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> {
/// Type that you you give as a parameter -- for queries with zero
/// or more than one input, this will be a tuple.
type Key: Clone + Debug + Hash + Eq;
/// What value does the query return?
type Value: Clone + Debug;
/// Internal struct storing the values for the query.
// type Storage: plumbing::QueryStorageOps<Self>;
type Storage;
/// A unique index identifying this query within the group.
const QUERY_INDEX: u16;
/// Name of the query method (e.g., `foo`)
const QUERY_NAME: &'static str;
/// Exact storage for this query from the storage for its group.
fn query_storage<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
/// Exact storage for this query from the storage for its group.
fn query_storage_mut<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
}
/// Return value from [the `query` method] on `Database`.
/// Gives access to various less common operations on queries.
///
/// [the `query` method]: trait.Database.html#method.query
pub struct QueryTable<'me, Q>
where
Q: Query,
{
db: &'me <Q as QueryDb<'me>>::DynDb,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTable<'me, Q>
where
Q: Query,
Q::Storage: QueryStorageOps<Q>,
{
/// Constructs a new `QueryTable`.
pub fn new(db: &'me <Q as QueryDb<'me>>::DynDb, storage: &'me Q::Storage) -> Self {
Self { db, storage }
}
/// Execute the query on a given input. Usually it's easier to
/// invoke the trait method directly. Note that for variadic
/// queries (those with no inputs, or those with more than one
/// input) the key will be a tuple.
pub fn get(&self, key: Q::Key) -> Q::Value {
self.storage.fetch(self.db, &key)
}
/// Completely clears the storage for this query.
///
/// This method breaks internal invariants of salsa, so any further queries
/// might return nonsense results. It is useful only in very specific
/// circumstances -- for example, when one wants to observe which values
/// dropped together with the table
pub fn purge(&self)
where
Q::Storage: plumbing::QueryStorageMassOps,
{
self.storage.purge();
}
}
/// Return value from [the `query_mut` method] on `Database`.
/// Gives access to the `set` method, notably, that is used to
/// set the value of an input query.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub struct QueryTableMut<'me, Q>
where
Q: Query +'me,
{
runtime: &'me mut Runtime,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTableMut<'me, Q>
where
Q: Query,
{
/// Constructs a new `QueryTableMut`.
pub fn new(runtime: &'me mut Runtime, storage: &'me Q::Storage) -> Self {
Self { runtime, storage }
}
/// Assign a value to an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn | (&mut self, key: Q::Key, value: Q::Value)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.set_with_durability(key, value, Durability::LOW);
}
/// Assign a value to an "input query", with the additional
/// promise that this value will **never change**. Must be used
/// outside of an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn set_with_durability(&mut self, key: Q::Key, value: Q::Value, durability: Durability)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.set(self.runtime, &key, value, durability);
}
/// Removes a value from an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// # Panics
/// Panics if the value was not previously set by `set` or
/// `set_with_durability`.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn remove(&mut self, key: Q::Key) -> Q::Value
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.remove(self.runtime, &key)
}
/// Sets the size of LRU cache of values for this query table.
///
/// That is, at most `cap` values will be preset in the table at the same
/// time. This helps with keeping maximum memory usage under control, at the
/// cost of potential extra recalculations of evicted values.
///
/// If `cap` is zero, all values are preserved, this is the default.
pub fn set_lru_capacity(&self, cap: usize)
where
Q::Storage: plumbing::LruQueryStorageOps,
{
self.storage.set_lru_capacity(cap);
}
/// Marks the computed value as outdated.
///
/// This causes salsa to re-execute the query function on the next access to
/// the query, even if all dependencies are up to date.
///
/// This is most commonly used as part of the [on-demand input
/// pattern](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn invalidate(&mut self, key: &Q::Key)
where
Q::Storage: plumbing::DerivedQueryStorageOps<Q>,
{
self.storage.invalidate(self.runtime, key)
}
}
/// A panic payload indicating that execution of a salsa query was cancelled.
///
/// This can occur for a few reasons:
/// *
/// *
/// *
#[derive(Debug)]
#[non_exhaustive]
pub enum Cancelled {
/// The query was operating on revision R, but there is a pending write to move to revision R+1.
#[non_exhaustive]
PendingWrite,
/// The query was blocked on another thread, and that thread panicked.
#[non_exhaustive]
PropagatedPanic,
}
impl Cancelled {
fn throw(self) ->! {
// We use resume and not panic here to avoid running the panic
// hook (that is, to avoid collecting and printing backtrace).
std::panic::resume_unwind(Box::new(self));
}
/// Runs `f`, and catches any salsa cancellation.
pub fn catch<F, T>(f: F) -> Result<T, Cancelled>
where
F: FnOnce() -> T + UnwindSafe,
{
match panic::catch_unwind(f) {
Ok(t) => Ok(t),
Err(payload) => match payload.downcast() {
Ok(cancelled) => Err(*cancelled),
Err(payload) => panic::resume_unwind(payload),
},
}
}
}
impl std::fmt::Display for Cancelled {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let why = match self {
Cancelled::PendingWrite => "pending write",
Cancelled::PropagatedPanic => "propagated panic",
};
f.write_str("cancelled because of ")?;
f.write_str(why)
}
}
impl std::error::Error for Cancelled {}
/// Captures the participants of a cycle that occurred when executing a query.
///
/// This type is meant to be used to help give meaningful error messages to the
/// user or to help salsa developers figure out why their program is resulting
/// in a computation cycle.
///
/// It is used in a few ways:
///
/// * During [cycle recovery](https://https://salsa-rs.github.io/salsa/cycles/fallback.html),
/// where it is given to the fallback function.
/// * As the panic value when an unexpected cycle (i.e., a cycle where one or more participants
/// lacks cycle recovery information) occurs.
///
/// You can read more about cycle handling in
/// the [salsa book](https://https://salsa-rs.github.io/salsa/cycles.html).
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Cycle {
participants: plumbing::CycleParticipants,
}
impl Cycle {
pub(crate) fn new(participants: plumbing::CycleParticipants) -> Self {
Self { participants }
}
/// True if two `Cycle` values represent the same cycle.
pub(crate) fn is(&self, cycle: &Cycle) -> bool {
Arc::ptr_eq(&self.participants, &cycle.participants)
}
pub(crate) fn throw(self) ->! {
log::debug!("throwing cycle {:?}", self);
std::panic::resume_unwind(Box::new(self))
}
pub(crate) fn catch<T>(execute: impl FnOnce() -> T) -> Result<T, Cycle> {
match std::panic::catch_unwind(AssertUnwindSafe(execute)) {
Ok(v) => Ok(v),
Err(err) => match err.downcast::<Cycle>() {
Ok(cycle) => Err(*cycle),
Err(other) => std::panic::resume_unwind(other),
},
}
}
/// Iterate over the [`DatabaseKeyIndex`] for each query participating
/// in the cycle. The start point of this iteration within the cycle
/// is arbitrary but deterministic, but the ordering is otherwise determined
/// by the execution.
pub fn participant_keys(&self) -> impl Iterator<Item = DatabaseKeyIndex> + '_ {
| set | identifier_name |
lib.rs | InternId;
pub use crate::interned::InternKey;
pub use crate::runtime::Runtime;
pub use crate::runtime::RuntimeId;
pub use crate::storage::Storage;
/// The base trait which your "query context" must implement. Gives
/// access to the salsa runtime, which you must embed into your query
/// context (along with whatever other state you may require).
pub trait Database: plumbing::DatabaseOps {
/// This function is invoked at key points in the salsa
/// runtime. It permits the database to be customized and to
/// inject logging or other custom behavior.
fn salsa_event(&self, event_fn: Event) {
#![allow(unused_variables)]
}
/// Starts unwinding the stack if the current revision is cancelled.
///
/// This method can be called by query implementations that perform
/// potentially expensive computations, in order to speed up propagation of
/// cancellation.
///
/// Cancellation will automatically be triggered by salsa on any query
/// invocation.
///
/// This method should not be overridden by `Database` implementors. A
/// `salsa_event` is emitted when this method is called, so that should be
/// used instead.
#[inline]
fn unwind_if_cancelled(&self) {
let runtime = self.salsa_runtime();
self.salsa_event(Event {
runtime_id: runtime.id(),
kind: EventKind::WillCheckCancellation,
});
let current_revision = runtime.current_revision();
let pending_revision = runtime.pending_revision();
log::debug!(
"unwind_if_cancelled: current_revision={:?}, pending_revision={:?}",
current_revision,
pending_revision
); |
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime(&self) -> &Runtime {
self.ops_salsa_runtime()
}
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime_mut(&mut self) -> &mut Runtime {
self.ops_salsa_runtime_mut()
}
}
/// The `Event` struct identifies various notable things that can
/// occur during salsa execution. Instances of this struct are given
/// to `salsa_event`.
pub struct Event {
/// The id of the snapshot that triggered the event. Usually
/// 1-to-1 with a thread, as well.
pub runtime_id: RuntimeId,
/// What sort of event was it.
pub kind: EventKind,
}
impl Event {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D:?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug +'me
where
D: plumbing::DatabaseOps,
{
EventDebug { event: self, db }
}
}
impl fmt::Debug for Event {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.runtime_id)
.field("kind", &self.kind)
.finish()
}
}
struct EventDebug<'me, D:?Sized>
where
D: plumbing::DatabaseOps,
{
event: &'me Event,
db: &'me D,
}
impl<'me, D:?Sized> fmt::Debug for EventDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.event.runtime_id)
.field("kind", &self.event.kind.debug(self.db))
.finish()
}
}
/// An enum identifying the various kinds of events that can occur.
pub enum EventKind {
/// Occurs when we found that all inputs to a memoized value are
/// up-to-date and hence the value can be re-used without
/// executing the closure.
///
/// Executes before the "re-used" value is returned.
DidValidateMemoizedValue {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that another thread (with id `other_runtime_id`) is processing the
/// given query (`database_key`), so we will block until they
/// finish.
///
/// Executes after we have registered with the other thread but
/// before they have answered us.
///
/// (NB: you can find the `id` of the current thread via the
/// `salsa_runtime`)
WillBlockOn {
/// The id of the runtime we will block on.
other_runtime_id: RuntimeId,
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that the function for this query will be executed.
/// This is either because it has never executed before or because
/// its inputs may be out of date.
WillExecute {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that `unwind_if_cancelled` was called and salsa will check if
/// the current revision has been cancelled.
WillCheckCancellation,
}
impl EventKind {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D:?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug +'me
where
D: plumbing::DatabaseOps,
{
EventKindDebug { kind: self, db }
}
}
impl fmt::Debug for EventKind {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", database_key)
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", other_runtime_id)
.field("database_key", database_key)
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", database_key)
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
struct EventKindDebug<'me, D:?Sized>
where
D: plumbing::DatabaseOps,
{
kind: &'me EventKind,
db: &'me D,
}
impl<'me, D:?Sized> fmt::Debug for EventKindDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", &other_runtime_id)
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
/// Indicates a database that also supports parallel query
/// evaluation. All of Salsa's base query support is capable of
/// parallel execution, but for it to work, your query key/value types
/// must also be `Send`, as must any additional data in your database.
pub trait ParallelDatabase: Database + Send {
/// Creates a second handle to the database that holds the
/// database fixed at a particular revision. So long as this
/// "frozen" handle exists, any attempt to [`set`] an input will
/// block.
///
/// [`set`]: struct.QueryTable.html#method.set
///
/// This is the method you are meant to use most of the time in a
/// parallel setting where modifications may arise asynchronously
/// (e.g., a language server). In this context, it is common to
/// wish to "fork off" a snapshot of the database performing some
/// series of queries in parallel and arranging the results. Using
/// this method for that purpose ensures that those queries will
/// see a consistent view of the database (it is also advisable
/// for those queries to use the [`Runtime::unwind_if_cancelled`]
/// method to check for cancellation).
///
/// # Panics
///
/// It is not permitted to create a snapshot from inside of a
/// query. Attepting to do so will panic.
///
/// # Deadlock warning
///
/// The intended pattern for snapshots is that, once created, they
/// are sent to another thread and used from there. As such, the
/// `snapshot` acquires a "read lock" on the database --
/// therefore, so long as the `snapshot` is not dropped, any
/// attempt to `set` a value in the database will block. If the
/// `snapshot` is owned by the same thread that is attempting to
/// `set`, this will cause a problem.
///
/// # How to implement this
///
/// Typically, this method will create a second copy of your
/// database type (`MyDatabaseType`, in the example below),
/// cloning over each of the fields from `self` into this new
/// copy. For the field that stores the salsa runtime, you should
/// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the
/// runtime. Finally, package up the result using `Snapshot::new`,
/// which is a simple wrapper type that only gives `&self` access
/// to the database within (thus preventing the use of methods
/// that may mutate the inputs):
///
/// [rfm]: struct.Runtime.html#method.snapshot
///
/// ```rust,ignore
/// impl ParallelDatabase for MyDatabaseType {
/// fn snapshot(&self) -> Snapshot<Self> {
/// Snapshot::new(
/// MyDatabaseType {
/// runtime: self.runtime.snapshot(self),
/// other_field: self.other_field.clone(),
/// }
/// )
/// }
/// }
/// ```
fn snapshot(&self) -> Snapshot<Self>;
}
/// Simple wrapper struct that takes ownership of a database `DB` and
/// only gives `&self` access to it. See [the `snapshot` method][fm]
/// for more details.
///
/// [fm]: trait.ParallelDatabase.html#method.snapshot
#[derive(Debug)]
pub struct Snapshot<DB:?Sized>
where
DB: ParallelDatabase,
{
db: DB,
}
impl<DB> Snapshot<DB>
where
DB: ParallelDatabase,
{
/// Creates a `Snapshot` that wraps the given database handle
/// `db`. From this point forward, only shared references to `db`
/// will be possible.
pub fn new(db: DB) -> Self {
Snapshot { db }
}
}
impl<DB> std::ops::Deref for Snapshot<DB>
where
DB: ParallelDatabase,
{
type Target = DB;
fn deref(&self) -> &DB {
&self.db
}
}
/// An integer that uniquely identifies a particular query instance within the
/// database. Used to track dependencies between queries. Fully ordered and
/// equatable but those orderings are arbitrary, and meant to be used only for
/// inserting into maps and the like.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DatabaseKeyIndex {
group_index: u16,
query_index: u16,
key_index: u32,
}
impl DatabaseKeyIndex {
/// Returns the index of the query group containing this key.
#[inline]
pub fn group_index(self) -> u16 {
self.group_index
}
/// Returns the index of the query within its query group.
#[inline]
pub fn query_index(self) -> u16 {
self.query_index
}
/// Returns the index of this particular query key within the query.
#[inline]
pub fn key_index(self) -> u32 {
self.key_index
}
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<D:?Sized>(self, db: &D) -> impl std::fmt::Debug + '_
where
D: plumbing::DatabaseOps,
{
DatabaseKeyIndexDebug { index: self, db }
}
}
/// Helper type for `DatabaseKeyIndex::debug`
struct DatabaseKeyIndexDebug<'me, D:?Sized>
where
D: plumbing::DatabaseOps,
{
index: DatabaseKeyIndex,
db: &'me D,
}
impl<D:?Sized> std::fmt::Debug for DatabaseKeyIndexDebug<'_, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.db.fmt_index(self.index, fmt)
}
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
///
/// Base trait of `Query` that has a lifetime parameter to allow the `DynDb` to be non-'static.
pub trait QueryDb<'d>: Sized {
/// Dyn version of the associated trait for this query group.
type DynDb:?Sized + Database + HasQueryGroup<Self::Group> + 'd;
/// Associate query group struct.
type Group: plumbing::QueryGroup<GroupStorage = Self::GroupStorage>;
/// Generated struct that contains storage for all queries in a group.
type GroupStorage;
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> {
/// Type that you you give as a parameter -- for queries with zero
/// or more than one input, this will be a tuple.
type Key: Clone + Debug + Hash + Eq;
/// What value does the query return?
type Value: Clone + Debug;
/// Internal struct storing the values for the query.
// type Storage: plumbing::QueryStorageOps<Self>;
type Storage;
/// A unique index identifying this query within the group.
const QUERY_INDEX: u16;
/// Name of the query method (e.g., `foo`)
const QUERY_NAME: &'static str;
/// Exact storage for this query from the storage for its group.
fn query_storage<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
/// Exact storage for this query from the storage for its group.
fn query_storage_mut<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
}
/// Return value from [the `query` method] on `Database`.
/// Gives access to various less common operations on queries.
///
/// [the `query` method]: trait.Database.html#method.query
pub struct QueryTable<'me, Q>
where
Q: Query,
{
db: &'me <Q as QueryDb<'me>>::DynDb,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTable<'me, Q>
where
Q: Query,
Q::Storage: QueryStorageOps<Q>,
{
/// Constructs a new `QueryTable`.
pub fn new(db: &'me <Q as QueryDb<'me>>::DynDb, storage: &'me Q::Storage) -> Self {
Self { db, storage }
}
/// Execute the query on a given input. Usually it's easier to
/// invoke the trait method directly. Note that for variadic
/// queries (those with no inputs, or those with more than one
/// input) the key will be a tuple.
pub fn get(&self, key: Q::Key) -> Q::Value {
self.storage.fetch(self.db, &key)
}
/// Completely clears the storage for this query.
///
/// This method breaks internal invariants of salsa, so any further queries
/// might return nonsense results. It is useful only in very specific
/// circumstances -- for example, when one wants to observe which values
/// dropped together with the table
pub fn purge(&self)
where
Q::Storage: plumbing::QueryStorageMassOps,
{
self.storage.purge();
}
}
/// Return value from [the `query_mut` method] on `Database`.
/// Gives access to the `set` method, notably, that is used to
/// set the value of an input query.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub struct QueryTableMut<'me, Q>
where
Q: Query +'me,
{
runtime: &'me mut Runtime,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTableMut<'me, Q>
where
Q: Query,
{
/// Constructs a new `QueryTableMut`.
pub fn new(runtime: &'me mut Runtime, storage: &'me Q::Storage) -> Self {
Self { runtime, storage }
}
/// Assign a value to an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn set(&mut self, key: Q::Key, value: Q::Value)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.set_with_durability(key, value, Durability::LOW);
}
/// Assign a value to an "input query", with the additional
/// promise that this value will **never change**. Must be used
/// outside of an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn set_with_durability(&mut self, key: Q::Key, value: Q::Value, durability: Durability)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.set(self.runtime, &key, value, durability);
}
/// Removes a value from an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// # Panics
/// Panics if the value was not previously set by `set` or
/// `set_with_durability`.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn remove(&mut self, key: Q::Key) -> Q::Value
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.remove(self.runtime, &key)
}
/// Sets the size of LRU cache of values for this query table.
///
/// That is, at most `cap` values will be preset in the table at the same
/// time. This helps with keeping maximum memory usage under control, at the
/// cost of potential extra recalculations of evicted values.
///
/// If `cap` is zero, all values are preserved, this is the default.
pub fn set_lru_capacity(&self, cap: usize)
where
Q::Storage: plumbing::LruQueryStorageOps,
{
self.storage.set_lru_capacity(cap);
}
/// Marks the computed value as outdated.
///
/// This causes salsa to re-execute the query function on the next access to
/// the query, even if all dependencies are up to date.
///
/// This is most commonly used as part of the [on-demand input
/// pattern](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn invalidate(&mut self, key: &Q::Key)
where
Q::Storage: plumbing::DerivedQueryStorageOps<Q>,
{
self.storage.invalidate(self.runtime, key)
}
}
/// A panic payload indicating that execution of a salsa query was cancelled.
///
/// This can occur for a few reasons:
/// *
/// *
/// *
#[derive(Debug)]
#[non_exhaustive]
pub enum Cancelled {
/// The query was operating on revision R, but there is a pending write to move to revision R+1.
#[non_exhaustive]
PendingWrite,
/// The query was blocked on another thread, and that thread panicked.
#[non_exhaustive]
PropagatedPanic,
}
impl Cancelled {
fn throw(self) ->! {
// We use resume and not panic here to avoid running the panic
// hook (that is, to avoid collecting and printing backtrace).
std::panic::resume_unwind(Box::new(self));
}
/// Runs `f`, and catches any salsa cancellation.
pub fn catch<F, T>(f: F) -> Result<T, Cancelled>
where
F: FnOnce() -> T + UnwindSafe,
{
match panic::catch_unwind(f) {
Ok(t) => Ok(t),
Err(payload) => match payload.downcast() {
Ok(cancelled) => Err(*cancelled),
Err(payload) => panic::resume_unwind(payload),
},
}
}
}
impl std::fmt::Display for Cancelled {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let why = match self {
Cancelled::PendingWrite => "pending write",
Cancelled::PropagatedPanic => "propagated panic",
};
f.write_str("cancelled because of ")?;
f.write_str(why)
}
}
impl std::error::Error for Cancelled {}
/// Captures the participants of a cycle that occurred when executing a query.
///
/// This type is meant to be used to help give meaningful error messages to the
/// user or to help salsa developers figure out why their program is resulting
/// in a computation cycle.
///
/// It is used in a few ways:
///
/// * During [cycle recovery](https://https://salsa-rs.github.io/salsa/cycles/fallback.html),
/// where it is given to the fallback function.
/// * As the panic value when an unexpected cycle (i.e., a cycle where one or more participants
/// lacks cycle recovery information) occurs.
///
/// You can read more about cycle handling in
/// the [salsa book](https://https://salsa-rs.github.io/salsa/cycles.html).
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Cycle {
participants: plumbing::CycleParticipants,
}
impl Cycle {
pub(crate) fn new(participants: plumbing::CycleParticipants) -> Self {
Self { participants }
}
/// True if two `Cycle` values represent the same cycle.
pub(crate) fn is(&self, cycle: &Cycle) -> bool {
Arc::ptr_eq(&self.participants, &cycle.participants)
}
pub(crate) fn throw(self) ->! {
log::debug!("throwing cycle {:?}", self);
std::panic::resume_unwind(Box::new(self))
}
pub(crate) fn catch<T>(execute: impl FnOnce() -> T) -> Result<T, Cycle> {
match std::panic::catch_unwind(AssertUnwindSafe(execute)) {
Ok(v) => Ok(v),
Err(err) => match err.downcast::<Cycle>() {
Ok(cycle) => Err(*cycle),
Err(other) => std::panic::resume_unwind(other),
},
}
}
/// Iterate over the [`DatabaseKeyIndex`] for each query participating
/// in the cycle. The start point of this iteration within the cycle
/// is arbitrary but deterministic, but the ordering is otherwise determined
/// by the execution.
pub fn participant_keys(&self) -> impl Iterator<Item = DatabaseKeyIndex> + '_ {
| if pending_revision > current_revision {
runtime.unwind_cancelled();
}
} | random_line_split |
pwm.rs | use std::{
fmt::Display,
fs,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use thiserror::Error;
use tracing::{debug, instrument};
/// Everything that can go wrong.
#[derive(Error, Debug)]
pub enum PwmError {
#[error("{0:?} not found")]
ControllerNotFound(Controller),
#[error("{0:?}/{1:?} not found")]
ChannelNotFound(Controller, Channel),
#[error("{0:?} not exported")]
NotExported(Controller),
#[error("failed to {0:?}: {1}")]
Sysfs(Access, #[source] std::io::Error),
#[error("duty cycle value must not be greater than the period value")]
DutyCycleGreaterThanPeriod,
#[error("legal polarity values: 'normal', 'inversed'")]
InvalidPolarity,
#[error("{0} cannot be changed while channel is enabled")]
IllegalChangeWhileEnabled(&'static str),
#[error("expected boolean value, got {0:?}")]
NotBoolean(String),
#[error("expected a duration in nanoseconds, got {0:?}: {1}")]
NotADuration(String, #[source] std::num::ParseIntError),
}
/// Used in PwmError to format sysfs related errors.
#[derive(Debug)]
pub enum Access {
Read(PathBuf),
Write(PathBuf),
}
/// Exposes PWM functionality.
///
/// Since the Linux kernel exposes PWM controllers and their settings through
/// sysfs, PWM operations are just file reads and writes. To allow testing with
/// a real file system but outside of sysfs, the `sysfs_root` property may be
/// used to "offset" those operations to an alternative directory.
///
/// Documentation on Linux PWM sysfs:
/// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html>
#[derive(Debug)]
pub struct Pwm {
sysfs_root: PathBuf,
}
/// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number.
#[derive(Debug, Clone)]
pub struct Controller(pub u32);
/// PWM controllers expose channels, which are also identified by non-negative numbers.
#[derive(Debug, Clone)]
pub struct Channel(pub u32);
type Result<T> = std::result::Result<T, PwmError>;
impl Pwm {
/// Initialize PWM.
pub fn new() -> Self {
Self::with_sysfs_root(PathBuf::from("/sys/class/pwm"))
}
/// Initialize PWM with an alternative sysfs directory, for testing.
pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self {
if!sysfs_root.exists() {
panic!("sysfs root does not exist: {:?}", sysfs_root);
}
Self { sysfs_root }
}
/// Returns the number of channels for the given controller.
#[instrument]
pub fn npwm(&self, controller: &Controller) -> Result<u32> {
self.controller_file(controller, "npwm")
.and_then(|path| read(&path))
.map(|s| {
s.trim()
.parse::<u32>()
.expect("npwm expected to contain the number of channels")
})
}
/// Returns whether a controller's channels are ready to be used.
#[instrument]
pub fn is_exported(&self, controller: &Controller) -> Result<bool> {
// A controller is exported if the channel subdirectories are there.
// Since a controller without any channel doesn't make sense, it's
// enough to check for the existance of the first channel's enable file.
match self.channel_dir(controller, &Channel(0)) {
Ok(_) => Ok(true),
Err(PwmError::NotExported(_)) => Ok(false),
Err(e) => Err(e),
}
}
/// Export a PWM controller, which enables access to its channels.
#[instrument]
pub fn export(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "export")
.and_then(|path| write(&path, "1"))
}
/// Unexport a PWM controller, which disables access to its channels.
#[instrument]
pub fn unexport(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "unexport")
.and_then(|path| write(&path, "1"))
}
/// Returns whether a controller's channel is enabled.
#[instrument]
pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> {
self.channel_file(controller, channel, "enable")
.and_then(|path| read(&path))
.and_then(parse_bool)
}
/// Enable a channel.
#[instrument]
pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "1"))
}
/// Disable a channel.
#[instrument]
pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "0"))
}
/// The total period of the PWM signal (read/write). Value is in nanoseconds
/// and is the sum of the active and inactive time of the PWM.
#[instrument]
pub fn set_period(
&mut self,
controller: Controller,
channel: Channel,
period: Duration,
) -> Result<()> {
let duty_cycle = self
.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "period")
.and_then(|path| write(&path, &period.as_nanos().to_string()))
}
/// The active time of the PWM signal (read/write). Value is in nanoseconds
/// and must be less than the period.
#[instrument]
pub fn set_duty_cycle(
&mut self,
controller: Controller,
channel: Channel,
duty_cycle: Duration,
) -> Result<()> {
let period = self
.channel_file(&controller, &channel, "period")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| write(&path, &duty_cycle.as_nanos().to_string()))
}
/// Changes the polarity of the PWM signal (read/write). Writes to this
/// property only work if the PWM chip supports changing the polarity. The
/// polarity can only be changed if the PWM is not enabled. Value is the
/// string “normal” or “inversed”.
#[instrument]
pub fn set_polarity(
&mut self,
controller: Controller,
channel: Channel,
polarity: Polarity,
) -> Result<()> {
// setting polarity is only allowed if channel is disabled:
if self.is_enabled(&controller, &channel)? {
return Err(PwmError::IllegalChangeWhileEnabled("polarity"));
}
self.channel_file(&controller, &channel, "polarity")
.and_then(|path| write(&path, &polarity.to_string()))
}
fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> {
let path = self.sysfs_root.join(format!("pwmchip{}", controller.0));
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> { | if path.is_file() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> {
let n_pwm = self.npwm(controller)?;
if channel.0 >= n_pwm {
return Err(PwmError::ChannelNotFound(
controller.clone(),
channel.clone(),
));
}
let path = self
.controller_dir(controller)
.map(|controller| controller.join(format!("pwm{}", channel.0)))?;
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
fn channel_file(
&self,
controller: &Controller,
channel: &Channel,
fname: &str,
) -> Result<PathBuf> {
let path = self
.channel_dir(controller, channel)
.map(|channel| channel.join(fname))?;
if path.is_file() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
}
fn read(path: &Path) -> Result<String> {
fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e))
}
fn write(path: &Path, contents: &str) -> Result<()> {
debug!("writing to {:?}", path);
fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e))
}
fn parse_bool(s: String) -> Result<bool> {
// sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html
match s.trim_end().to_lowercase().as_ref() {
"1" | "y" | "yes" | "true" => Ok(true),
"0" | "n" | "no" | "false" | "" => Ok(false),
_ => Err(PwmError::NotBoolean(s)),
}
}
fn parse_duration(s: String) -> Result<Duration> {
s.trim_end()
.parse::<u64>()
.map_err(|e| PwmError::NotADuration(s, e))
.map(Duration::from_nanos)
}
#[derive(Debug)]
pub enum Polarity {
Normal,
Inversed,
}
impl Display for Polarity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Polarity::*;
match *self {
Normal => write!(f, "normal"),
Inversed => write!(f, "inversed"),
}
}
}
impl FromStr for Polarity {
type Err = PwmError;
fn from_str(s: &str) -> Result<Self> {
use Polarity::*;
match s {
"normal" => Ok(Normal),
"inversed" => Ok(Inversed),
_ => Err(PwmError::InvalidPolarity),
}
}
}
#[cfg(test)]
mod should {
use super::*;
use temp_dir::TempDir;
#[test]
fn fail_if_controller_not_found() {
let tmp = TempDir::new().unwrap();
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
assert!(matches!(
pwm.export(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
assert!(matches!(
pwm.unexport(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
}
#[test]
fn export_and_unexport_a_controller() {
let tmp = TempDir::new().unwrap();
let chip = tmp.child("pwmchip0");
fs::create_dir(&chip).unwrap();
let export = touch(chip.join("export"));
let unexport = touch(chip.join("unexport"));
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
pwm.export(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&export).unwrap(), "1");
pwm.unexport(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&unexport).unwrap(), "1");
}
fn touch(path: PathBuf) -> PathBuf {
fs::write(&path, b"").unwrap();
path
}
} | let path = self
.sysfs_root
.join(format!("pwmchip{}/{}", controller.0, fname)); | random_line_split |
pwm.rs | use std::{
fmt::Display,
fs,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use thiserror::Error;
use tracing::{debug, instrument};
/// Everything that can go wrong.
#[derive(Error, Debug)]
pub enum PwmError {
#[error("{0:?} not found")]
ControllerNotFound(Controller),
#[error("{0:?}/{1:?} not found")]
ChannelNotFound(Controller, Channel),
#[error("{0:?} not exported")]
NotExported(Controller),
#[error("failed to {0:?}: {1}")]
Sysfs(Access, #[source] std::io::Error),
#[error("duty cycle value must not be greater than the period value")]
DutyCycleGreaterThanPeriod,
#[error("legal polarity values: 'normal', 'inversed'")]
InvalidPolarity,
#[error("{0} cannot be changed while channel is enabled")]
IllegalChangeWhileEnabled(&'static str),
#[error("expected boolean value, got {0:?}")]
NotBoolean(String),
#[error("expected a duration in nanoseconds, got {0:?}: {1}")]
NotADuration(String, #[source] std::num::ParseIntError),
}
/// Used in PwmError to format sysfs related errors.
#[derive(Debug)]
pub enum Access {
Read(PathBuf),
Write(PathBuf),
}
/// Exposes PWM functionality.
///
/// Since the Linux kernel exposes PWM controllers and their settings through
/// sysfs, PWM operations are just file reads and writes. To allow testing with
/// a real file system but outside of sysfs, the `sysfs_root` property may be
/// used to "offset" those operations to an alternative directory.
///
/// Documentation on Linux PWM sysfs:
/// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html>
#[derive(Debug)]
pub struct Pwm {
sysfs_root: PathBuf,
}
/// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number.
#[derive(Debug, Clone)]
pub struct Controller(pub u32);
/// PWM controllers expose channels, which are also identified by non-negative numbers.
#[derive(Debug, Clone)]
pub struct Channel(pub u32);
type Result<T> = std::result::Result<T, PwmError>;
impl Pwm {
/// Initialize PWM.
pub fn new() -> Self {
Self::with_sysfs_root(PathBuf::from("/sys/class/pwm"))
}
/// Initialize PWM with an alternative sysfs directory, for testing.
pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self {
if!sysfs_root.exists() {
panic!("sysfs root does not exist: {:?}", sysfs_root);
}
Self { sysfs_root }
}
/// Returns the number of channels for the given controller.
#[instrument]
pub fn npwm(&self, controller: &Controller) -> Result<u32> {
self.controller_file(controller, "npwm")
.and_then(|path| read(&path))
.map(|s| {
s.trim()
.parse::<u32>()
.expect("npwm expected to contain the number of channels")
})
}
/// Returns whether a controller's channels are ready to be used.
#[instrument]
pub fn is_exported(&self, controller: &Controller) -> Result<bool> {
// A controller is exported if the channel subdirectories are there.
// Since a controller without any channel doesn't make sense, it's
// enough to check for the existance of the first channel's enable file.
match self.channel_dir(controller, &Channel(0)) {
Ok(_) => Ok(true),
Err(PwmError::NotExported(_)) => Ok(false),
Err(e) => Err(e),
}
}
/// Export a PWM controller, which enables access to its channels.
#[instrument]
pub fn export(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "export")
.and_then(|path| write(&path, "1"))
}
/// Unexport a PWM controller, which disables access to its channels.
#[instrument]
pub fn unexport(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "unexport")
.and_then(|path| write(&path, "1"))
}
/// Returns whether a controller's channel is enabled.
#[instrument]
pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> {
self.channel_file(controller, channel, "enable")
.and_then(|path| read(&path))
.and_then(parse_bool)
}
/// Enable a channel.
#[instrument]
pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "1"))
}
/// Disable a channel.
#[instrument]
pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "0"))
}
/// The total period of the PWM signal (read/write). Value is in nanoseconds
/// and is the sum of the active and inactive time of the PWM.
#[instrument]
pub fn set_period(
&mut self,
controller: Controller,
channel: Channel,
period: Duration,
) -> Result<()> {
let duty_cycle = self
.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "period")
.and_then(|path| write(&path, &period.as_nanos().to_string()))
}
/// The active time of the PWM signal (read/write). Value is in nanoseconds
/// and must be less than the period.
#[instrument]
pub fn set_duty_cycle(
&mut self,
controller: Controller,
channel: Channel,
duty_cycle: Duration,
) -> Result<()> {
let period = self
.channel_file(&controller, &channel, "period")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| write(&path, &duty_cycle.as_nanos().to_string()))
}
/// Changes the polarity of the PWM signal (read/write). Writes to this
/// property only work if the PWM chip supports changing the polarity. The
/// polarity can only be changed if the PWM is not enabled. Value is the
/// string “normal” or “inversed”.
#[instrument]
pub fn set_polarity(
&mut self,
controller: Controller,
channel: Channel,
polarity: Polarity,
) -> Result<()> {
// setting polarity is only allowed if channel is disabled:
if self.is_enabled(&controller, &channel)? {
return Err(PwmError::IllegalChangeWhileEnabled("polarity"));
}
self.channel_file(&controller, &channel, "polarity")
.and_then(|path| write(&path, &polarity.to_string()))
}
fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> {
let path = self.sysfs_root.join(format!("pwmchip{}", controller.0));
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> {
let path = self
.sysfs_root
.join(format!("pwmchip{}/{}", controller.0, fname));
if path.is_file() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> {
let n_pwm = self.npwm(controller)?;
if channel.0 >= n_pwm {
return Err(PwmError::ChannelNotFound(
controller.clone(),
channel.clone(),
));
}
let path = self
.controller_dir(controller)
.map(|controller| controller.join(format!("pwm{}", channel.0)))?;
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
fn channel_file(
&self,
controller: &Controller,
channel: &Channel,
fname: &str,
) -> Result<PathBuf> {
let path = self
.channel_dir(controller, channel)
.map(|channel| channel.join(fname))?;
if path.is_file() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
}
fn read(path: &Path) -> Result<String> {
fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e))
}
fn write(path: &Path, contents: &str) -> Result<()> {
debug!("writing to {:?}", path);
fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e))
}
fn parse_bool(s: String) -> Result<bool> {
// sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html
match s.trim_end().to_lowercase().as_ref() {
"1" | "y" | "yes" | "true" => Ok(true),
"0" | "n" | "no" | "false" | "" => Ok(false),
_ => Err(PwmError::NotBoolean(s)),
}
}
fn parse_duration(s: String) -> Result<Duration> {
s.trim_end()
.parse::<u64>()
.map_err(|e| PwmError::NotADuration(s, e))
.map(Duration::from_nanos)
}
#[derive(Debug)]
pub enum Polarity | ormal,
Inversed,
}
impl Display for Polarity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Polarity::*;
match *self {
Normal => write!(f, "normal"),
Inversed => write!(f, "inversed"),
}
}
}
impl FromStr for Polarity {
type Err = PwmError;
fn from_str(s: &str) -> Result<Self> {
use Polarity::*;
match s {
"normal" => Ok(Normal),
"inversed" => Ok(Inversed),
_ => Err(PwmError::InvalidPolarity),
}
}
}
#[cfg(test)]
mod should {
use super::*;
use temp_dir::TempDir;
#[test]
fn fail_if_controller_not_found() {
let tmp = TempDir::new().unwrap();
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
assert!(matches!(
pwm.export(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
assert!(matches!(
pwm.unexport(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
}
#[test]
fn export_and_unexport_a_controller() {
let tmp = TempDir::new().unwrap();
let chip = tmp.child("pwmchip0");
fs::create_dir(&chip).unwrap();
let export = touch(chip.join("export"));
let unexport = touch(chip.join("unexport"));
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
pwm.export(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&export).unwrap(), "1");
pwm.unexport(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&unexport).unwrap(), "1");
}
fn touch(path: PathBuf) -> PathBuf {
fs::write(&path, b"").unwrap();
path
}
}
| {
N | identifier_name |
pwm.rs | use std::{
fmt::Display,
fs,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use thiserror::Error;
use tracing::{debug, instrument};
/// Everything that can go wrong.
#[derive(Error, Debug)]
pub enum PwmError {
#[error("{0:?} not found")]
ControllerNotFound(Controller),
#[error("{0:?}/{1:?} not found")]
ChannelNotFound(Controller, Channel),
#[error("{0:?} not exported")]
NotExported(Controller),
#[error("failed to {0:?}: {1}")]
Sysfs(Access, #[source] std::io::Error),
#[error("duty cycle value must not be greater than the period value")]
DutyCycleGreaterThanPeriod,
#[error("legal polarity values: 'normal', 'inversed'")]
InvalidPolarity,
#[error("{0} cannot be changed while channel is enabled")]
IllegalChangeWhileEnabled(&'static str),
#[error("expected boolean value, got {0:?}")]
NotBoolean(String),
#[error("expected a duration in nanoseconds, got {0:?}: {1}")]
NotADuration(String, #[source] std::num::ParseIntError),
}
/// Used in PwmError to format sysfs related errors.
#[derive(Debug)]
pub enum Access {
Read(PathBuf),
Write(PathBuf),
}
/// Exposes PWM functionality.
///
/// Since the Linux kernel exposes PWM controllers and their settings through
/// sysfs, PWM operations are just file reads and writes. To allow testing with
/// a real file system but outside of sysfs, the `sysfs_root` property may be
/// used to "offset" those operations to an alternative directory.
///
/// Documentation on Linux PWM sysfs:
/// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html>
#[derive(Debug)]
pub struct Pwm {
sysfs_root: PathBuf,
}
/// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number.
#[derive(Debug, Clone)]
pub struct Controller(pub u32);
/// PWM controllers expose channels, which are also identified by non-negative numbers.
#[derive(Debug, Clone)]
pub struct Channel(pub u32);
type Result<T> = std::result::Result<T, PwmError>;
impl Pwm {
/// Initialize PWM.
pub fn new() -> Self {
Self::with_sysfs_root(PathBuf::from("/sys/class/pwm"))
}
/// Initialize PWM with an alternative sysfs directory, for testing.
pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self {
if!sysfs_root.exists() {
panic!("sysfs root does not exist: {:?}", sysfs_root);
}
Self { sysfs_root }
}
/// Returns the number of channels for the given controller.
#[instrument]
pub fn npwm(&self, controller: &Controller) -> Result<u32> {
self.controller_file(controller, "npwm")
.and_then(|path| read(&path))
.map(|s| {
s.trim()
.parse::<u32>()
.expect("npwm expected to contain the number of channels")
})
}
/// Returns whether a controller's channels are ready to be used.
#[instrument]
pub fn is_exported(&self, controller: &Controller) -> Result<bool> {
// A controller is exported if the channel subdirectories are there.
// Since a controller without any channel doesn't make sense, it's
// enough to check for the existance of the first channel's enable file.
match self.channel_dir(controller, &Channel(0)) {
Ok(_) => Ok(true),
Err(PwmError::NotExported(_)) => Ok(false),
Err(e) => Err(e),
}
}
/// Export a PWM controller, which enables access to its channels.
#[instrument]
pub fn export(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "export")
.and_then(|path| write(&path, "1"))
}
/// Unexport a PWM controller, which disables access to its channels.
#[instrument]
pub fn unexport(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "unexport")
.and_then(|path| write(&path, "1"))
}
/// Returns whether a controller's channel is enabled.
#[instrument]
pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> {
self.channel_file(controller, channel, "enable")
.and_then(|path| read(&path))
.and_then(parse_bool)
}
/// Enable a channel.
#[instrument]
pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "1"))
}
/// Disable a channel.
#[instrument]
pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "0"))
}
/// The total period of the PWM signal (read/write). Value is in nanoseconds
/// and is the sum of the active and inactive time of the PWM.
#[instrument]
pub fn set_period(
&mut self,
controller: Controller,
channel: Channel,
period: Duration,
) -> Result<()> {
let duty_cycle = self
.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "period")
.and_then(|path| write(&path, &period.as_nanos().to_string()))
}
/// The active time of the PWM signal (read/write). Value is in nanoseconds
/// and must be less than the period.
#[instrument]
pub fn set_duty_cycle(
&mut self,
controller: Controller,
channel: Channel,
duty_cycle: Duration,
) -> Result<()> {
let period = self
.channel_file(&controller, &channel, "period")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| write(&path, &duty_cycle.as_nanos().to_string()))
}
/// Changes the polarity of the PWM signal (read/write). Writes to this
/// property only work if the PWM chip supports changing the polarity. The
/// polarity can only be changed if the PWM is not enabled. Value is the
/// string “normal” or “inversed”.
#[instrument]
pub fn set_polarity(
&mut self,
controller: Controller,
channel: Channel,
polarity: Polarity,
) -> Result<()> {
// setting polarity is only allowed if channel is disabled:
if self.is_enabled(&controller, &channel)? {
return Err(PwmError::IllegalChangeWhileEnabled("polarity"));
}
self.channel_file(&controller, &channel, "polarity")
.and_then(|path| write(&path, &polarity.to_string()))
}
fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> {
let path = self.sysfs_root.join(format!("pwmchip{}", controller.0));
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> {
let path = self
.sysfs_root
.join(format!("pwmchip{}/{}", controller.0, fname));
if path.is_file() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> {
let n_pwm = self.npwm(controller)?;
if channel.0 >= n_pwm {
return Err(PwmError::ChannelNotFound(
controller.clone(),
channel.clone(),
));
}
let path = self
.controller_dir(controller)
.map(|controller| controller.join(format!("pwm{}", channel.0)))?;
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
fn channel_file(
&self,
controller: &Controller,
channel: &Channel,
fname: &str,
) -> Result<PathBuf> {
let path = self
.channel_dir(controller, channel)
.map(|channel| channel.join(fname))?;
if path.is_file() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
}
fn read(path: &Path) -> Result<String> {
fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e))
}
fn write(path: &Path, contents: &str) -> Result<()> {
debug!("writing to {:?}", path);
fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e))
}
fn parse_bool(s: String) -> Result<bool> {
// sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html
match s.trim_end().to_lowercase().as_ref() {
"1" | "y" | "yes" | "true" => Ok(true),
"0" | "n" | "no" | "false" | "" => Ok(false),
_ => Err(PwmError::NotBoolean(s)),
}
}
fn parse_duration(s: String) -> Result<Duration> {
s. | ve(Debug)]
pub enum Polarity {
Normal,
Inversed,
}
impl Display for Polarity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Polarity::*;
match *self {
Normal => write!(f, "normal"),
Inversed => write!(f, "inversed"),
}
}
}
impl FromStr for Polarity {
type Err = PwmError;
fn from_str(s: &str) -> Result<Self> {
use Polarity::*;
match s {
"normal" => Ok(Normal),
"inversed" => Ok(Inversed),
_ => Err(PwmError::InvalidPolarity),
}
}
}
#[cfg(test)]
mod should {
use super::*;
use temp_dir::TempDir;
#[test]
fn fail_if_controller_not_found() {
let tmp = TempDir::new().unwrap();
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
assert!(matches!(
pwm.export(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
assert!(matches!(
pwm.unexport(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
}
#[test]
fn export_and_unexport_a_controller() {
let tmp = TempDir::new().unwrap();
let chip = tmp.child("pwmchip0");
fs::create_dir(&chip).unwrap();
let export = touch(chip.join("export"));
let unexport = touch(chip.join("unexport"));
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
pwm.export(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&export).unwrap(), "1");
pwm.unexport(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&unexport).unwrap(), "1");
}
fn touch(path: PathBuf) -> PathBuf {
fs::write(&path, b"").unwrap();
path
}
}
| trim_end()
.parse::<u64>()
.map_err(|e| PwmError::NotADuration(s, e))
.map(Duration::from_nanos)
}
#[deri | identifier_body |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32),
None,
Atom(AtomID),
UnknownAtom(AtomID),
}
impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
{
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() &!0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
}
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data,..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
| self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn generate(window: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0.. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | return Err(Error{error_code: e.error_code()})
};
| conditional_block |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32), | impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
{
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() &!0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
}
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data,..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
return Err(Error{error_code: e.error_code()})
};
self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn generate(window: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0.. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | None,
Atom(AtomID),
UnknownAtom(AtomID),
}
| random_line_split |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32),
None,
Atom(AtomID),
UnknownAtom(AtomID),
}
impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
{
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() &!0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
}
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data,..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
return Err(Error{error_code: e.error_code()})
};
self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn gen | ndow: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0.. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | erate(wi | identifier_name |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32),
None,
Atom(AtomID),
UnknownAtom(AtomID),
}
impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
|
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data,..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
return Err(Error{error_code: e.error_code()})
};
self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn generate(window: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0.. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | {
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() & !0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
} | identifier_body |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
},
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct | {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if!res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if!res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
}
| ApiError | identifier_name |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
},
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct ApiError {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> | let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if!res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
}
| {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if !res.is_array() { | identifier_body |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
},
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
}; | .clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct ApiError {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if!res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if!res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
} |
//attach query params
resource_url
.query_pairs_mut() | random_line_split |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => | ,
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct ApiError {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if!res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if!res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
}
| {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
} | conditional_block |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" =>?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) | && txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
}
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if!pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
}
| {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if !self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status != LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if !transferring_leader | identifier_body |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" =>?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn | <T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if!self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status!= LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if!transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
}
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if!pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
}
| on_reactivate_memory_lock_tick | identifier_name |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" =>?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if!self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status!= LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if!transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
}
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if!pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible. | continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
} | let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted { | random_line_split |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" =>?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if!self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status!= LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if!transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else |
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if!pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
}
| {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
} | conditional_block |
lib.rs | use std::fmt;
use std::time::{Duration, SystemTime, SystemTimeError};
/// Enum with the seven days of the week.
#[derive(Debug, Clone, Copy)]
pub enum Day {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
/// Maps the `Day` enum to a string representation, e.g. "Monday".
pub fn day_string(day: Day) -> &'static str {
match day {
Day::Sunday => "Sunday",
Day::Monday => "Monday",
Day::Tuesday => "Tuesday",
Day::Wednesday => "Wednesday",
Day::Thursday => "Thursday",
Day::Friday => "Friday",
Day::Saturday => "Saturday",
}
}
/// Maps the `Day` enum to a shortened string representation, e.g. "Mon".
pub fn day_abbrev_string(day: Day) -> &'static str {
&day_string(day)[0..3]
}
impl fmt::Display for Day {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", day_string(*self))
}
}
/// Enum with the months of the year.
#[derive(Debug, Clone, Copy)]
pub enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
/// Maps the `Month` enum to a string representation, e.g. "January".
pub fn month_string(month: Month) -> &'static str {
match month {
Month::January => "January",
Month::February => "February",
Month::March => "March",
Month::April => "April",
Month::May => "May",
Month::June => "June",
Month::July => "July",
Month::August => "August",
Month::September => "September",
Month::October => "October",
Month::November => "November",
Month::December => "December",
}
}
/// Maps the `Month` enum to a shortened string representation, e.g. "Jan".
pub fn month_abbrev_string(month: Month) -> &'static str {
&month_string(month)[0..3]
}
impl fmt::Display for Month {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", month_string(*self))
}
}
| if year % 400 == 0 {
366
} else if year % 100 == 0 {
365
} else if year % 4 == 0 {
366
} else {
365
}
}
/// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month.
pub fn days_in_month(year: u64, month: Month) -> u64 {
match month {
Month::January => 31,
Month::February if days_in_year(year) == 366 => 29,
Month::February => 28,
Month::March => 31,
Month::April => 30,
Month::May => 31,
Month::June => 30,
Month::July => 31,
Month::August => 31,
Month::September => 30,
Month::October => 31,
Month::November => 30,
Month::December => 31,
}
}
/// Converts a `Month` enum to an integer in the range 1-12.
pub fn index_from_month(month: Month) -> u64 {
match month {
Month::January => 1,
Month::February => 2,
Month::March => 3,
Month::April => 4,
Month::May => 5,
Month::June => 6,
Month::July => 7,
Month::August => 8,
Month::September => 9,
Month::October => 10,
Month::November => 11,
Month::December => 12,
}
}
/// Converts an integer in the range 1-12 into the corresponding `Month` enum.
/// Values outside the 1-12 range are converted to `None`.
pub fn month_from_index(index: u64) -> Option<Month> {
match index {
1 => Some(Month::January),
2 => Some(Month::February),
3 => Some(Month::March),
4 => Some(Month::April),
5 => Some(Month::May),
6 => Some(Month::June),
7 => Some(Month::July),
8 => Some(Month::August),
9 => Some(Month::September),
10 => Some(Month::October),
11 => Some(Month::November),
12 => Some(Month::December),
_ => None,
}
}
/// Returns the number of seconds in a day.
pub fn seconds_in_day() -> u64 {
24 * 60 * 60
}
/// Returns the number of seconds in an hour.
pub fn seconds_in_hour() -> u64 {
60 * 60
}
/// Returns the number of seconds in a minute.
pub fn seconds_in_minute() -> u64 {
60
}
/// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides
/// more useful functions. The impl of this struct has functions that allow easily
/// extracting the year/month/date/etc. for the given point in time. In actual fact
/// the internal representation of this struct is a `Duration` since the unix epoch,
/// so that error-handling is only required once upon creating the instance, and
/// not for each attempt at extracting date/time fields.
pub struct PostEpochTime {
delta: Duration,
}
impl PostEpochTime {
/// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally
/// in the future relative to the unix epoch, or an error will be returned.
pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> {
Ok(PostEpochTime {
delta: st.duration_since(SystemTime::UNIX_EPOCH)?,
})
}
/// Create a `PostEpochTime` for the current instant. The current instant must be
/// in the future relative to the unix epoch, or an error will be returned.
pub fn now() -> Result<Self, SystemTimeError> {
Self::from(&SystemTime::now())
}
/// Returns the number of milliseconds passed since the unix epoch.
pub fn milliseconds_since_epoch(&self) -> u128 {
self.delta.as_millis()
}
/// Returns the number of microseconds passed since the unix epoch.
pub fn microseconds_since_epoch(&self) -> u128 {
self.delta.as_micros()
}
/// Returns the number of nanoseconds passed since the unix epoch.
pub fn nanoseconds_since_epoch(&self) -> u128 {
self.delta.as_nanos()
}
/// Returns the number of complete seconds passed since the unix epoch.
pub fn seconds_since_epoch(&self) -> u64 {
self.delta.as_secs()
}
/// Returns the number of complete days passed since the unix epoch.
pub fn days_since_epoch(&self) -> u64 {
self.delta.as_secs() / seconds_in_day()
}
/// Returns the day of the week that this point in time falls on.
pub fn day_of_week(&self) -> Day {
match self.days_since_epoch() % 7 {
0 => Day::Thursday,
1 => Day::Friday,
2 => Day::Saturday,
3 => Day::Sunday,
4 => Day::Monday,
5 => Day::Tuesday,
6 => Day::Wednesday,
_ => panic!("Modulo operator is broken"),
}
}
fn year_split(&self) -> (u64, u64) {
let mut days = self.days_since_epoch();
let mut year = 1970;
loop {
let in_year = days_in_year(year);
if days < in_year {
break;
}
days -= in_year;
year += 1;
}
(year, days)
}
/// Returns the year (e.g. 2020) this point in time falls on.
pub fn year(&self) -> u64 {
self.year_split().0
}
/// Returns the day of the year for this point in time (1-indexed).
/// A return value of 1 indicates January 1, a value of 2 indicates January 2,
/// and so on. If the year is a leap year the largest returned value
/// would be 366, and for non-leap years it would be 365.
pub fn day_of_year(&self) -> u64 {
self.year_split().1 + 1
}
fn month_split(&self) -> (Month, u64) {
let (year, mut days) = self.year_split();
let mut month = Month::January;
loop {
let in_month = days_in_month(year, month);
if days < in_month {
break;
}
days -= in_month;
month =
month_from_index(index_from_month(month) + 1).expect("Month should never overflow");
}
(month, days)
}
/// Returns the month this point in time falls on.
pub fn month(&self) -> Month {
self.month_split().0
}
/// Returns the day of the month for this point in time (1-indexed).
/// A return value of 1 means it falls on the first of the month. The maximum
/// returned value will be 31.
pub fn day_of_month(&self) -> u64 {
self.month_split().1 + 1
}
/// Returns the second within the day (0-indexed). This will be in the range
/// 0..86399 (inclusive).
pub fn second_in_day(&self) -> u64 {
self.delta.as_secs() % seconds_in_day()
}
/// Returns the hour within the day (0-indexed). This will be in the range
/// 0..23 (inclusive).
pub fn hour(&self) -> u64 {
self.second_in_day() / seconds_in_hour()
}
/// Returns the second within the hour (0-indexed). This will be in the range
/// 0..3599 (inclusive).
pub fn second_in_hour(&self) -> u64 {
self.second_in_day() % seconds_in_hour()
}
/// Returns the minute within the hour (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn minute(&self) -> u64 {
self.second_in_hour() / seconds_in_minute()
}
/// Returns the second within the minute (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn second(&self) -> u64 {
self.delta.as_secs() % seconds_in_minute()
}
}
impl fmt::Display for PostEpochTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}, {} {} {} {:02}:{:02}:{:02}",
day_abbrev_string(self.day_of_week()),
self.day_of_month(),
month_abbrev_string(self.month()),
self.year(),
self.hour(),
self.minute(),
self.second()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke_test() {
let timestamp = SystemTime::UNIX_EPOCH + Duration::new(1580610340, 123);
let pet = PostEpochTime::from(×tamp).unwrap();
assert_eq!(format!("{}", pet), "Sun, 2 Feb 2020 02:25:40".to_string());
}
} | /// Takes in a year (e.g. 2019) and returns the number of days in that year.
pub fn days_in_year(year: u64) -> u64 { | random_line_split |
lib.rs | use std::fmt;
use std::time::{Duration, SystemTime, SystemTimeError};
/// Enum with the seven days of the week.
#[derive(Debug, Clone, Copy)]
pub enum Day {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
/// Maps the `Day` enum to a string representation, e.g. "Monday".
pub fn day_string(day: Day) -> &'static str {
match day {
Day::Sunday => "Sunday",
Day::Monday => "Monday",
Day::Tuesday => "Tuesday",
Day::Wednesday => "Wednesday",
Day::Thursday => "Thursday",
Day::Friday => "Friday",
Day::Saturday => "Saturday",
}
}
/// Maps the `Day` enum to a shortened string representation, e.g. "Mon".
pub fn day_abbrev_string(day: Day) -> &'static str {
&day_string(day)[0..3]
}
impl fmt::Display for Day {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", day_string(*self))
}
}
/// Enum with the months of the year.
#[derive(Debug, Clone, Copy)]
pub enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
/// Maps the `Month` enum to a string representation, e.g. "January".
pub fn month_string(month: Month) -> &'static str {
match month {
Month::January => "January",
Month::February => "February",
Month::March => "March",
Month::April => "April",
Month::May => "May",
Month::June => "June",
Month::July => "July",
Month::August => "August",
Month::September => "September",
Month::October => "October",
Month::November => "November",
Month::December => "December",
}
}
/// Maps the `Month` enum to a shortened string representation, e.g. "Jan".
pub fn month_abbrev_string(month: Month) -> &'static str {
&month_string(month)[0..3]
}
impl fmt::Display for Month {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", month_string(*self))
}
}
/// Takes in a year (e.g. 2019) and returns the number of days in that year.
pub fn days_in_year(year: u64) -> u64 {
if year % 400 == 0 {
366
} else if year % 100 == 0 {
365
} else if year % 4 == 0 {
366
} else {
365
}
}
/// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month.
pub fn days_in_month(year: u64, month: Month) -> u64 {
match month {
Month::January => 31,
Month::February if days_in_year(year) == 366 => 29,
Month::February => 28,
Month::March => 31,
Month::April => 30,
Month::May => 31,
Month::June => 30,
Month::July => 31,
Month::August => 31,
Month::September => 30,
Month::October => 31,
Month::November => 30,
Month::December => 31,
}
}
/// Converts a `Month` enum to an integer in the range 1-12.
pub fn index_from_month(month: Month) -> u64 {
match month {
Month::January => 1,
Month::February => 2,
Month::March => 3,
Month::April => 4,
Month::May => 5,
Month::June => 6,
Month::July => 7,
Month::August => 8,
Month::September => 9,
Month::October => 10,
Month::November => 11,
Month::December => 12,
}
}
/// Converts an integer in the range 1-12 into the corresponding `Month` enum.
/// Values outside the 1-12 range are converted to `None`.
pub fn month_from_index(index: u64) -> Option<Month> {
match index {
1 => Some(Month::January),
2 => Some(Month::February),
3 => Some(Month::March),
4 => Some(Month::April),
5 => Some(Month::May),
6 => Some(Month::June),
7 => Some(Month::July),
8 => Some(Month::August),
9 => Some(Month::September),
10 => Some(Month::October),
11 => Some(Month::November),
12 => Some(Month::December),
_ => None,
}
}
/// Returns the number of seconds in a day.
pub fn seconds_in_day() -> u64 {
24 * 60 * 60
}
/// Returns the number of seconds in an hour.
pub fn seconds_in_hour() -> u64 {
60 * 60
}
/// Returns the number of seconds in a minute.
pub fn seconds_in_minute() -> u64 {
60
}
/// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides
/// more useful functions. The impl of this struct has functions that allow easily
/// extracting the year/month/date/etc. for the given point in time. In actual fact
/// the internal representation of this struct is a `Duration` since the unix epoch,
/// so that error-handling is only required once upon creating the instance, and
/// not for each attempt at extracting date/time fields.
pub struct PostEpochTime {
delta: Duration,
}
impl PostEpochTime {
/// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally
/// in the future relative to the unix epoch, or an error will be returned.
pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> {
Ok(PostEpochTime {
delta: st.duration_since(SystemTime::UNIX_EPOCH)?,
})
}
/// Create a `PostEpochTime` for the current instant. The current instant must be
/// in the future relative to the unix epoch, or an error will be returned.
pub fn now() -> Result<Self, SystemTimeError> |
/// Returns the number of milliseconds passed since the unix epoch.
pub fn milliseconds_since_epoch(&self) -> u128 {
self.delta.as_millis()
}
/// Returns the number of microseconds passed since the unix epoch.
pub fn microseconds_since_epoch(&self) -> u128 {
self.delta.as_micros()
}
/// Returns the number of nanoseconds passed since the unix epoch.
pub fn nanoseconds_since_epoch(&self) -> u128 {
self.delta.as_nanos()
}
/// Returns the number of complete seconds passed since the unix epoch.
pub fn seconds_since_epoch(&self) -> u64 {
self.delta.as_secs()
}
/// Returns the number of complete days passed since the unix epoch.
pub fn days_since_epoch(&self) -> u64 {
self.delta.as_secs() / seconds_in_day()
}
/// Returns the day of the week that this point in time falls on.
pub fn day_of_week(&self) -> Day {
match self.days_since_epoch() % 7 {
0 => Day::Thursday,
1 => Day::Friday,
2 => Day::Saturday,
3 => Day::Sunday,
4 => Day::Monday,
5 => Day::Tuesday,
6 => Day::Wednesday,
_ => panic!("Modulo operator is broken"),
}
}
fn year_split(&self) -> (u64, u64) {
let mut days = self.days_since_epoch();
let mut year = 1970;
loop {
let in_year = days_in_year(year);
if days < in_year {
break;
}
days -= in_year;
year += 1;
}
(year, days)
}
/// Returns the year (e.g. 2020) this point in time falls on.
pub fn year(&self) -> u64 {
self.year_split().0
}
/// Returns the day of the year for this point in time (1-indexed).
/// A return value of 1 indicates January 1, a value of 2 indicates January 2,
/// and so on. If the year is a leap year the largest returned value
/// would be 366, and for non-leap years it would be 365.
pub fn day_of_year(&self) -> u64 {
self.year_split().1 + 1
}
fn month_split(&self) -> (Month, u64) {
let (year, mut days) = self.year_split();
let mut month = Month::January;
loop {
let in_month = days_in_month(year, month);
if days < in_month {
break;
}
days -= in_month;
month =
month_from_index(index_from_month(month) + 1).expect("Month should never overflow");
}
(month, days)
}
/// Returns the month this point in time falls on.
pub fn month(&self) -> Month {
self.month_split().0
}
/// Returns the day of the month for this point in time (1-indexed).
/// A return value of 1 means it falls on the first of the month. The maximum
/// returned value will be 31.
pub fn day_of_month(&self) -> u64 {
self.month_split().1 + 1
}
/// Returns the second within the day (0-indexed). This will be in the range
/// 0..86399 (inclusive).
pub fn second_in_day(&self) -> u64 {
self.delta.as_secs() % seconds_in_day()
}
/// Returns the hour within the day (0-indexed). This will be in the range
/// 0..23 (inclusive).
pub fn hour(&self) -> u64 {
self.second_in_day() / seconds_in_hour()
}
/// Returns the second within the hour (0-indexed). This will be in the range
/// 0..3599 (inclusive).
pub fn second_in_hour(&self) -> u64 {
self.second_in_day() % seconds_in_hour()
}
/// Returns the minute within the hour (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn minute(&self) -> u64 {
self.second_in_hour() / seconds_in_minute()
}
/// Returns the second within the minute (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn second(&self) -> u64 {
self.delta.as_secs() % seconds_in_minute()
}
}
impl fmt::Display for PostEpochTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}, {} {} {} {:02}:{:02}:{:02}",
day_abbrev_string(self.day_of_week()),
self.day_of_month(),
month_abbrev_string(self.month()),
self.year(),
self.hour(),
self.minute(),
self.second()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke_test() {
let timestamp = SystemTime::UNIX_EPOCH + Duration::new(1580610340, 123);
let pet = PostEpochTime::from(×tamp).unwrap();
assert_eq!(format!("{}", pet), "Sun, 2 Feb 2020 02:25:40".to_string());
}
}
| {
Self::from(&SystemTime::now())
} | identifier_body |
lib.rs | use std::fmt;
use std::time::{Duration, SystemTime, SystemTimeError};
/// Enum with the seven days of the week.
#[derive(Debug, Clone, Copy)]
pub enum Day {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
/// Maps the `Day` enum to a string representation, e.g. "Monday".
pub fn day_string(day: Day) -> &'static str {
match day {
Day::Sunday => "Sunday",
Day::Monday => "Monday",
Day::Tuesday => "Tuesday",
Day::Wednesday => "Wednesday",
Day::Thursday => "Thursday",
Day::Friday => "Friday",
Day::Saturday => "Saturday",
}
}
/// Maps the `Day` enum to a shortened string representation, e.g. "Mon".
pub fn day_abbrev_string(day: Day) -> &'static str {
&day_string(day)[0..3]
}
impl fmt::Display for Day {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", day_string(*self))
}
}
/// Enum with the months of the year.
#[derive(Debug, Clone, Copy)]
pub enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
/// Maps the `Month` enum to a string representation, e.g. "January".
pub fn month_string(month: Month) -> &'static str {
match month {
Month::January => "January",
Month::February => "February",
Month::March => "March",
Month::April => "April",
Month::May => "May",
Month::June => "June",
Month::July => "July",
Month::August => "August",
Month::September => "September",
Month::October => "October",
Month::November => "November",
Month::December => "December",
}
}
/// Maps the `Month` enum to a shortened string representation, e.g. "Jan".
pub fn | (month: Month) -> &'static str {
&month_string(month)[0..3]
}
impl fmt::Display for Month {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", month_string(*self))
}
}
/// Takes in a year (e.g. 2019) and returns the number of days in that year.
pub fn days_in_year(year: u64) -> u64 {
if year % 400 == 0 {
366
} else if year % 100 == 0 {
365
} else if year % 4 == 0 {
366
} else {
365
}
}
/// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month.
pub fn days_in_month(year: u64, month: Month) -> u64 {
match month {
Month::January => 31,
Month::February if days_in_year(year) == 366 => 29,
Month::February => 28,
Month::March => 31,
Month::April => 30,
Month::May => 31,
Month::June => 30,
Month::July => 31,
Month::August => 31,
Month::September => 30,
Month::October => 31,
Month::November => 30,
Month::December => 31,
}
}
/// Converts a `Month` enum to an integer in the range 1-12.
pub fn index_from_month(month: Month) -> u64 {
match month {
Month::January => 1,
Month::February => 2,
Month::March => 3,
Month::April => 4,
Month::May => 5,
Month::June => 6,
Month::July => 7,
Month::August => 8,
Month::September => 9,
Month::October => 10,
Month::November => 11,
Month::December => 12,
}
}
/// Converts an integer in the range 1-12 into the corresponding `Month` enum.
/// Values outside the 1-12 range are converted to `None`.
pub fn month_from_index(index: u64) -> Option<Month> {
match index {
1 => Some(Month::January),
2 => Some(Month::February),
3 => Some(Month::March),
4 => Some(Month::April),
5 => Some(Month::May),
6 => Some(Month::June),
7 => Some(Month::July),
8 => Some(Month::August),
9 => Some(Month::September),
10 => Some(Month::October),
11 => Some(Month::November),
12 => Some(Month::December),
_ => None,
}
}
/// Returns the number of seconds in a day.
pub fn seconds_in_day() -> u64 {
24 * 60 * 60
}
/// Returns the number of seconds in an hour.
pub fn seconds_in_hour() -> u64 {
60 * 60
}
/// Returns the number of seconds in a minute.
pub fn seconds_in_minute() -> u64 {
60
}
/// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides
/// more useful functions. The impl of this struct has functions that allow easily
/// extracting the year/month/date/etc. for the given point in time. In actual fact
/// the internal representation of this struct is a `Duration` since the unix epoch,
/// so that error-handling is only required once upon creating the instance, and
/// not for each attempt at extracting date/time fields.
pub struct PostEpochTime {
delta: Duration,
}
impl PostEpochTime {
/// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally
/// in the future relative to the unix epoch, or an error will be returned.
pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> {
Ok(PostEpochTime {
delta: st.duration_since(SystemTime::UNIX_EPOCH)?,
})
}
/// Create a `PostEpochTime` for the current instant. The current instant must be
/// in the future relative to the unix epoch, or an error will be returned.
pub fn now() -> Result<Self, SystemTimeError> {
Self::from(&SystemTime::now())
}
/// Returns the number of milliseconds passed since the unix epoch.
pub fn milliseconds_since_epoch(&self) -> u128 {
self.delta.as_millis()
}
/// Returns the number of microseconds passed since the unix epoch.
pub fn microseconds_since_epoch(&self) -> u128 {
self.delta.as_micros()
}
/// Returns the number of nanoseconds passed since the unix epoch.
pub fn nanoseconds_since_epoch(&self) -> u128 {
self.delta.as_nanos()
}
/// Returns the number of complete seconds passed since the unix epoch.
pub fn seconds_since_epoch(&self) -> u64 {
self.delta.as_secs()
}
/// Returns the number of complete days passed since the unix epoch.
pub fn days_since_epoch(&self) -> u64 {
self.delta.as_secs() / seconds_in_day()
}
/// Returns the day of the week that this point in time falls on.
pub fn day_of_week(&self) -> Day {
match self.days_since_epoch() % 7 {
0 => Day::Thursday,
1 => Day::Friday,
2 => Day::Saturday,
3 => Day::Sunday,
4 => Day::Monday,
5 => Day::Tuesday,
6 => Day::Wednesday,
_ => panic!("Modulo operator is broken"),
}
}
fn year_split(&self) -> (u64, u64) {
let mut days = self.days_since_epoch();
let mut year = 1970;
loop {
let in_year = days_in_year(year);
if days < in_year {
break;
}
days -= in_year;
year += 1;
}
(year, days)
}
/// Returns the year (e.g. 2020) this point in time falls on.
pub fn year(&self) -> u64 {
self.year_split().0
}
/// Returns the day of the year for this point in time (1-indexed).
/// A return value of 1 indicates January 1, a value of 2 indicates January 2,
/// and so on. If the year is a leap year the largest returned value
/// would be 366, and for non-leap years it would be 365.
pub fn day_of_year(&self) -> u64 {
self.year_split().1 + 1
}
fn month_split(&self) -> (Month, u64) {
let (year, mut days) = self.year_split();
let mut month = Month::January;
loop {
let in_month = days_in_month(year, month);
if days < in_month {
break;
}
days -= in_month;
month =
month_from_index(index_from_month(month) + 1).expect("Month should never overflow");
}
(month, days)
}
/// Returns the month this point in time falls on.
pub fn month(&self) -> Month {
self.month_split().0
}
/// Returns the day of the month for this point in time (1-indexed).
/// A return value of 1 means it falls on the first of the month. The maximum
/// returned value will be 31.
pub fn day_of_month(&self) -> u64 {
self.month_split().1 + 1
}
/// Returns the second within the day (0-indexed). This will be in the range
/// 0..86399 (inclusive).
pub fn second_in_day(&self) -> u64 {
self.delta.as_secs() % seconds_in_day()
}
/// Returns the hour within the day (0-indexed). This will be in the range
/// 0..23 (inclusive).
pub fn hour(&self) -> u64 {
self.second_in_day() / seconds_in_hour()
}
/// Returns the second within the hour (0-indexed). This will be in the range
/// 0..3599 (inclusive).
pub fn second_in_hour(&self) -> u64 {
self.second_in_day() % seconds_in_hour()
}
/// Returns the minute within the hour (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn minute(&self) -> u64 {
self.second_in_hour() / seconds_in_minute()
}
/// Returns the second within the minute (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn second(&self) -> u64 {
self.delta.as_secs() % seconds_in_minute()
}
}
impl fmt::Display for PostEpochTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}, {} {} {} {:02}:{:02}:{:02}",
day_abbrev_string(self.day_of_week()),
self.day_of_month(),
month_abbrev_string(self.month()),
self.year(),
self.hour(),
self.minute(),
self.second()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke_test() {
let timestamp = SystemTime::UNIX_EPOCH + Duration::new(1580610340, 123);
let pet = PostEpochTime::from(×tamp).unwrap();
assert_eq!(format!("{}", pet), "Sun, 2 Feb 2020 02:25:40".to_string());
}
}
| month_abbrev_string | identifier_name |
mysql.rs | used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
|
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn test_null_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_null_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_null_constraint_violation (id1 int not null, id2 int not null)")
.await
.unwrap();
// Error code 1364
{
let res = conn
.query_raw("INSERT INTO test_null_constraint_violation () VALUES ()", &[])
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1364"), err.original_code());
assert_eq!(
Some("Field \'id1\' doesn\'t have a default value"),
err.original_message()
);
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id1")]), constraint) | {
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
} | identifier_body |
mysql.rs | logic used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
{
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
}
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn | () {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_null_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_null_constraint_violation (id1 int not null, id2 int not null)")
.await
.unwrap();
// Error code 1364
{
let res = conn
.query_raw("INSERT INTO test_null_constraint_violation () VALUES ()", &[])
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
assert_eq!(Some("1364"), err.original_code());
assert_eq!(
Some("Field \'id1\' doesn\'t have a default value"),
err.original_message()
);
assert_eq!(&DatabaseConstraint::Fields(vec![String::from("id1")]), constraint) | test_null_constraint_violation | identifier_name |
mysql.rs | use url::Url;
use crate::{
ast::{ParameterizedValue, Query},
connector::{metrics, queryable::*, ResultSet, DBIO},
error::{Error, ErrorKind},
visitor::{self, Visitor},
};
/// A connector interface for the MySQL database.
#[derive(Debug)]
pub struct Mysql {
pub(crate) pool: my::Pool,
pub(crate) url: MysqlUrl,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
/// Wraps a connection url and exposes the parsing logic used by quaint, including default values.
#[derive(Debug, Clone)]
pub struct MysqlUrl {
url: Url,
query_params: MysqlUrlQueryParams,
}
impl MysqlUrl {
/// Parse `Url` to `MysqlUrl`. Returns error for mistyped connection
/// parameters.
pub fn new(url: Url) -> Result<Self, Error> {
let query_params = Self::parse_query_params(&url)?;
Ok(Self { url, query_params })
}
/// The bare `Url` to the database.
pub fn url(&self) -> &Url {
&self.url
}
/// The percent-decoded database username.
pub fn username(&self) -> Cow<str> {
match percent_decode(self.url.username().as_bytes()).decode_utf8() {
Ok(username) => username,
Err(_) => {
#[cfg(not(feature = "tracing-log"))]
warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
#[cfg(feature = "tracing-log")]
tracing::warn!("Couldn't decode username to UTF-8, using the non-decoded version.");
self.url.username().into()
}
}
}
/// The percent-decoded database password.
pub fn password(&self) -> Option<Cow<str>> {
match self
.url
.password()
.and_then(|pw| percent_decode(pw.as_bytes()).decode_utf8().ok())
{
Some(password) => Some(password),
None => self.url.password().map(|s| s.into()),
}
}
/// Name of the database connected. Defaults to `mysql`.
pub fn dbname(&self) -> &str {
match self.url.path_segments() {
Some(mut segments) => segments.next().unwrap_or("mysql"),
None => "mysql",
}
}
/// The database host. If `socket` and `host` are not set, defaults to `localhost`.
pub fn host(&self) -> &str {
self.url.host_str().unwrap_or("localhost")
}
/// If set, connected to the database through a Unix socket.
pub fn socket(&self) -> &Option<String> {
&self.query_params.socket
}
/// The database port, defaults to `3306`.
pub fn port(&self) -> u16 {
self.url.port().unwrap_or(3306)
}
fn default_connection_limit() -> usize {
num_cpus::get_physical() * 2 + 1
}
fn parse_query_params(url: &Url) -> Result<MysqlUrlQueryParams, Error> {
let mut connection_limit = Self::default_connection_limit();
let mut ssl_opts = my::SslOpts::default();
let mut use_ssl = false;
let mut socket = None;
let mut socket_timeout = None;
let mut connect_timeout = Duration::from_secs(5);
for (k, v) in url.query_pairs() {
match k.as_ref() {
"connection_limit" => {
let as_int: usize = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connection_limit = as_int;
}
"sslcert" => {
use_ssl = true;
ssl_opts.set_root_cert_path(Some(Path::new(&*v).to_path_buf()));
}
"sslidentity" => {
use_ssl = true;
ssl_opts.set_pkcs12_path(Some(Path::new(&*v).to_path_buf()));
}
"sslpassword" => {
use_ssl = true;
ssl_opts.set_password(Some(v.to_string()));
}
"socket" => {
socket = Some(v.replace("(", "").replace(")", ""));
}
"socket_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
socket_timeout = Some(Duration::from_secs(as_int));
}
"connect_timeout" => {
let as_int = v
.parse()
.map_err(|_| Error::builder(ErrorKind::InvalidConnectionArguments).build())?;
connect_timeout = Duration::from_secs(as_int);
}
"sslaccept" => {
match v.as_ref() {
"strict" => {}
"accept_invalid_certs" => {
ssl_opts.set_danger_accept_invalid_certs(true);
}
_ => {
#[cfg(not(feature = "tracing-log"))]
debug!("Unsupported SSL accept mode {}, defaulting to `strict`", v);
#[cfg(feature = "tracing-log")]
tracing::debug!(
message = "Unsupported SSL accept mode, defaulting to `strict`",
mode = &*v
);
}
};
}
_ => {
#[cfg(not(feature = "tracing-log"))]
trace!("Discarding connection string param: {}", k);
#[cfg(feature = "tracing-log")]
tracing::trace!(message = "Discarding connection string param", param = &*k);
}
};
}
Ok(MysqlUrlQueryParams {
ssl_opts,
connection_limit,
use_ssl,
socket,
connect_timeout,
socket_timeout,
})
}
#[cfg(feature = "pooled")]
pub(crate) fn connection_limit(&self) -> usize {
self.query_params.connection_limit
}
pub(crate) fn to_opts_builder(&self) -> my::OptsBuilder {
let mut config = my::OptsBuilder::new();
config.user(Some(self.username()));
config.pass(self.password());
config.db_name(Some(self.dbname()));
match self.socket() {
Some(ref socket) => {
config.socket(Some(socket));
}
None => {
config.ip_or_hostname(self.host());
config.tcp_port(self.port());
}
}
config.stmt_cache_size(Some(1000));
config.conn_ttl(Some(Duration::from_secs(5)));
if self.query_params.use_ssl {
config.ssl_opts(Some(self.query_params.ssl_opts.clone()));
}
config
}
}
#[derive(Debug, Clone)]
pub(crate) struct MysqlUrlQueryParams {
ssl_opts: my::SslOpts,
connection_limit: usize,
use_ssl: bool,
socket: Option<String>,
socket_timeout: Option<Duration>,
connect_timeout: Duration,
}
impl Mysql {
/// Create a new MySQL connection using `OptsBuilder` from the `mysql` crate.
pub fn new(url: MysqlUrl) -> crate::Result<Self> {
let mut opts = url.to_opts_builder();
let pool_opts = my::PoolOptions::with_constraints(my::PoolConstraints::new(1, 1).unwrap());
opts.pool_options(pool_opts);
Ok(Self {
socket_timeout: url.query_params.socket_timeout,
connect_timeout: url.query_params.connect_timeout,
pool: my::Pool::new(opts),
url,
})
}
async fn timeout<T, F, E>(&self, f: F) -> crate::Result<T>
where
F: Future<Output = std::result::Result<T, E>>,
E: Into<Error>,
{
match self.socket_timeout {
Some(duration) => match timeout(duration, f).await {
Ok(Ok(result)) => Ok(result),
Ok(Err(err)) => Err(err.into()),
Err(to) => Err(to.into()),
},
None => match f.await {
Ok(result) => Ok(result),
Err(err) => Err(err.into()),
},
}
}
}
impl TransactionCapable for Mysql {}
impl Queryable for Mysql {
fn query<'a>(&'a self, q: Query<'a>) -> DBIO<'a, ResultSet> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.query_raw(&sql, ¶ms).await })
}
fn execute<'a>(&'a self, q: Query<'a>) -> DBIO<'a, u64> {
let (sql, params) = visitor::Mysql::build(q);
DBIO::new(async move { self.execute_raw(&sql, ¶ms).await })
}
fn query_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue]) -> DBIO<'a, ResultSet> {
metrics::query("mysql.query_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
let columns = results
.columns_ref()
.iter()
.map(|s| s.name_str().into_owned())
.collect();
let last_id = results.last_insert_id();
let mut result_set = ResultSet::new(columns, Vec::new());
let (_, rows) = self
.timeout(results.map_and_drop(|mut row| row.take_result_row()))
.await?;
for row in rows.into_iter() {
result_set.rows.push(row?);
}
if let Some(id) = last_id {
result_set.set_last_insert_id(id);
};
Ok(result_set)
})
}
fn execute_raw<'a>(&'a self, sql: &'a str, params: &'a [ParameterizedValue<'a>]) -> DBIO<'a, u64> {
metrics::query("mysql.execute_raw", sql, params, move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
let results = self
.timeout(conn.prep_exec(sql, conversion::conv_params(params)))
.await?;
Ok(results.affected_rows())
})
}
fn raw_cmd<'a>(&'a self, cmd: &'a str) -> DBIO<'a, ()> {
metrics::query("mysql.raw_cmd", cmd, &[], move || async move {
let conn = timeout(self.connect_timeout, self.pool.get_conn()).await??;
self.timeout(conn.query(cmd)).await?;
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::MysqlUrl;
use crate::{
ast::{Insert, ParameterizedValue, Select},
connector::Queryable,
error::*,
single::Quaint,
};
use once_cell::sync::Lazy;
use std::env;
use url::Url;
static CONN_STR: Lazy<String> = Lazy::new(|| env::var("TEST_MYSQL").expect("TEST_MYSQL env var"));
#[test]
fn should_parse_socket_url() {
let url = MysqlUrl::new(Url::parse("mysql://root@localhost/dbname?socket=(/tmp/mysql.sock)").unwrap()).unwrap();
assert_eq!("dbname", url.dbname());
assert_eq!(&Some(String::from("/tmp/mysql.sock")), url.socket());
}
#[tokio::test]
async fn should_provide_a_database_connection() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let res = connection
.query_raw(
"select * from information_schema.`COLUMNS` where COLUMN_NAME = 'unknown_123'",
&[],
)
.await
.unwrap();
assert!(res.is_empty());
}
const TABLE_DEF: &str = r#"
CREATE TABLE `user`(
id int4 PRIMARY KEY NOT NULL,
name text NOT NULL,
age int4 NOT NULL,
salary float4
);
"#;
const CREATE_USER: &str = r#"
INSERT INTO `user` (id, name, age, salary)
VALUES (1, 'Joe', 27, 20000.00 );
"#;
const DROP_TABLE: &str = "DROP TABLE IF EXISTS `user`;";
#[tokio::test]
async fn should_map_columns_correctly() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
connection.query_raw(DROP_TABLE, &[]).await.unwrap();
connection.query_raw(TABLE_DEF, &[]).await.unwrap();
let ch_ch_ch_ch_changees = connection.execute_raw(CREATE_USER, &[]).await.unwrap();
assert_eq!(1, ch_ch_ch_ch_changees);
let rows = connection.query_raw("SELECT * FROM `user`", &[]).await.unwrap();
assert_eq!(rows.len(), 1);
let row = rows.get(0).unwrap();
assert_eq!(row["id"].as_i64(), Some(1));
assert_eq!(row["name"].as_str(), Some("Joe"));
assert!(row["name"].is_text());
assert_eq!(row["age"].as_i64(), Some(27));
assert_eq!(row["salary"].as_f64(), Some(20000.0));
}
#[tokio::test]
async fn blobs_roundtrip() {
let connection = Quaint::new(&CONN_STR).await.unwrap();
let blob: Vec<u8> = vec![4, 2, 0];
connection
.query_raw("DROP TABLE IF EXISTS mysql_blobs_roundtrip_test", &[])
.await
.unwrap();
connection
.query_raw(
"CREATE TABLE mysql_blobs_roundtrip_test (id int AUTO_INCREMENT PRIMARY KEY, bytes MEDIUMBLOB)",
&[],
)
.await
.unwrap();
let insert = Insert::single_into("mysql_blobs_roundtrip_test").value("bytes", blob.as_slice());
connection.query(insert.into()).await.unwrap();
let roundtripped = Select::from_table("mysql_blobs_roundtrip_test").column("bytes");
let roundtripped = connection.query(roundtripped.into()).await.unwrap();
assert_eq!(
roundtripped.into_single().unwrap().at(0).unwrap(),
&ParameterizedValue::Bytes(blob.as_slice().into())
);
}
#[tokio::test]
async fn should_map_nonexisting_database_error() {
let mut url = Url::parse(&CONN_STR).unwrap();
url.set_username("root").unwrap();
url.set_path("/this_does_not_exist");
let url = url.as_str().to_string();
let conn = Quaint::new(&url).await.unwrap();
let res = conn.query_raw("SELECT 1 + 1", &[]).await;
assert!(&res.is_err());
let err = res.unwrap_err();
match err.kind() {
ErrorKind::DatabaseDoesNotExist { db_name } => {
assert_eq!(Some("1049"), err.original_code());
assert_eq!(Some("Unknown database \'this_does_not_exist\'"), err.original_message());
assert_eq!("this_does_not_exist", db_name.as_str())
}
e => panic!("Expected `DatabaseDoesNotExist`, got {:?}", e),
}
}
#[tokio::test]
async fn test_uniq_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_uniq_constraint_violation").await;
let _ = conn.raw_cmd("DROP INDEX idx_uniq_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_uniq_constraint_violation (id1 int, id2 int)")
.await
.unwrap();
conn.raw_cmd("CREATE UNIQUE INDEX idx_uniq_constraint_violation ON test_uniq_constraint_violation (id1, id2) USING btree").await.unwrap();
conn.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await
.unwrap();
let res = conn
.query_raw(
"INSERT INTO test_uniq_constraint_violation (id1, id2) VALUES (1, 2)",
&[],
)
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::UniqueConstraintViolation { constraint } => {
assert_eq!(Some("1062"), err.original_code());
assert_eq!(
&DatabaseConstraint::Index(String::from("idx_uniq_constraint_violation")),
constraint,
)
}
_ => panic!(err),
}
}
#[tokio::test]
async fn test_null_constraint_violation() {
let conn = Quaint::new(&CONN_STR).await.unwrap();
let _ = conn.raw_cmd("DROP TABLE test_null_constraint_violation").await;
conn.raw_cmd("CREATE TABLE test_null_constraint_violation (id1 int not null, id2 int not null)")
.await
.unwrap();
// Error code 1364
{
let res = conn
.query_raw("INSERT INTO test_null_constraint_violation () VALUES ()", &[])
.await;
let err = res.unwrap_err();
match err.kind() {
ErrorKind::NullConstraintViolation { constraint } => {
| use percent_encoding::percent_decode;
use std::{borrow::Cow, future::Future, path::Path, time::Duration};
use tokio::time::timeout; | random_line_split |
|
lib.rs | //! Salak is a multi layered configuration loader and zero-boilerplate configuration parser, with many predefined sources.
//!
//! 1. [About](#about)
//! 2. [Quick Start](#quick-start)
//! 3. [Features](#features)
//! * [Predefined Sources](#predefined-sources)
//! * [Key Convention](#key-convention)
//! * [Value Placeholder Parsing](#value-placeholder-parsing)
//! * [Attributes For Derive](#attributes-for-derive)
//! * [Reload Configuration](#reload-configuration)
//! * [Resource Factory](#resource-factory)
//!
//! ## About
//! `salak` is a multi layered configuration loader with many predefined sources. Also it
//! is a zero-boilerplate configuration parser which provides an auto-derive procedure macro
//! to derive [`FromEnvironment`] so that we can parse configuration structs without any additional codes.
//!
//! ## Quick Start
//! A simple example of `salak`:
//!
//! ```
//! use salak::*;
//!
//! #[derive(Debug, FromEnvironment)]
//! #[salak(prefix = "config")]
//! struct Config {
//! #[salak(default = false)]
//! verbose: bool,
//! optional: Option<String>,
//! #[salak(name = "val")]
//! value: i64,
//! }
//! let env = Salak::builder()
//! .set("config.val", "2021")
//! .build()
//! .unwrap();
//! let config = env.get::<Config>().unwrap();
//! assert_eq!(2021, config.value);
//! assert_eq!(None, config.optional);
//! assert_eq!(false, config.verbose);
//! ```
//!
//! ## Features
//!
//! #### Predefined Sources
//! Predefined sources has the following order, [`Salak`] will find by sequence of these orders,
//! if the property with specified key is found at the current source, than return immediately. Otherwise,
//! it will search the next source.
//!
//! 1. Random source provides a group of keys can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn | <T: PrefixedFromEnvironment>(&self) -> Res<T> {
self.require::<T>(T::prefix())
}
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>,
iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>,
}
/// Parsing value from environment by [`SalakContext`].
pub trait FromEnvironment: Sized {
/// Generate object from [`SalakContext`].
/// * `val` - Property value can be parsed from.
/// * `env` - Context.
///
/// ```no_run
/// use salak::*;
/// pub struct Config {
/// key: String
/// }
/// impl FromEnvironment for Config {
/// fn from_env(
/// val: Option<Property<'_>>,
/// env: &mut SalakContext<'_>,
/// ) -> Result<Self, PropertyError> {
/// Ok(Self{
/// key: env.require_def("key", None)?,
/// })
/// }
/// }
///
/// ```
fn from_env(val: Option<Property<'_>>, env: &mut SalakContext<'_>) -> Res<Self>;
}
| get | identifier_name |
lib.rs | //! Salak is a multi layered configuration loader and zero-boilerplate configuration parser, with many predefined sources.
//!
//! 1. [About](#about)
//! 2. [Quick Start](#quick-start)
//! 3. [Features](#features)
//! * [Predefined Sources](#predefined-sources)
//! * [Key Convention](#key-convention)
//! * [Value Placeholder Parsing](#value-placeholder-parsing)
//! * [Attributes For Derive](#attributes-for-derive)
//! * [Reload Configuration](#reload-configuration)
//! * [Resource Factory](#resource-factory)
//!
//! ## About
//! `salak` is a multi layered configuration loader with many predefined sources. Also it
//! is a zero-boilerplate configuration parser which provides an auto-derive procedure macro
//! to derive [`FromEnvironment`] so that we can parse configuration structs without any additional codes.
//!
//! ## Quick Start
//! A simple example of `salak`:
//!
//! ```
//! use salak::*;
//!
//! #[derive(Debug, FromEnvironment)]
//! #[salak(prefix = "config")]
//! struct Config {
//! #[salak(default = false)]
//! verbose: bool,
//! optional: Option<String>,
//! #[salak(name = "val")]
//! value: i64,
//! }
//! let env = Salak::builder()
//! .set("config.val", "2021")
//! .build()
//! .unwrap();
//! let config = env.get::<Config>().unwrap();
//! assert_eq!(2021, config.value);
//! assert_eq!(None, config.optional);
//! assert_eq!(false, config.verbose);
//! ```
//!
//! ## Features
//!
//! #### Predefined Sources
//! Predefined sources has the following order, [`Salak`] will find by sequence of these orders,
//! if the property with specified key is found at the current source, than return immediately. Otherwise,
//! it will search the next source.
//!
//! 1. Random source provides a group of keys can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn get<T: PrefixedFromEnvironment>(&self) -> Res<T> {
self.require::<T>(T::prefix())
}
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>, | }
/// Parsing value from environment by [`SalakContext`].
pub trait FromEnvironment: Sized {
/// Generate object from [`SalakContext`].
/// * `val` - Property value can be parsed from.
/// * `env` - Context.
///
/// ```no_run
/// use salak::*;
/// pub struct Config {
/// key: String
/// }
/// impl FromEnvironment for Config {
/// fn from_env(
/// val: Option<Property<'_>>,
/// env: &mut SalakContext<'_>,
/// ) -> Result<Self, PropertyError> {
/// Ok(Self{
/// key: env.require_def("key", None)?,
/// })
/// }
/// }
///
/// ```
fn from_env(val: Option<Property<'_>>, env: &mut SalakContext<'_>) -> Res<Self>;
} | iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>, | random_line_split |
lib.rs | //! Salak is a multi layered configuration loader and zero-boilerplate configuration parser, with many predefined sources.
//!
//! 1. [About](#about)
//! 2. [Quick Start](#quick-start)
//! 3. [Features](#features)
//! * [Predefined Sources](#predefined-sources)
//! * [Key Convention](#key-convention)
//! * [Value Placeholder Parsing](#value-placeholder-parsing)
//! * [Attributes For Derive](#attributes-for-derive)
//! * [Reload Configuration](#reload-configuration)
//! * [Resource Factory](#resource-factory)
//!
//! ## About
//! `salak` is a multi layered configuration loader with many predefined sources. Also it
//! is a zero-boilerplate configuration parser which provides an auto-derive procedure macro
//! to derive [`FromEnvironment`] so that we can parse configuration structs without any additional codes.
//!
//! ## Quick Start
//! A simple example of `salak`:
//!
//! ```
//! use salak::*;
//!
//! #[derive(Debug, FromEnvironment)]
//! #[salak(prefix = "config")]
//! struct Config {
//! #[salak(default = false)]
//! verbose: bool,
//! optional: Option<String>,
//! #[salak(name = "val")]
//! value: i64,
//! }
//! let env = Salak::builder()
//! .set("config.val", "2021")
//! .build()
//! .unwrap();
//! let config = env.get::<Config>().unwrap();
//! assert_eq!(2021, config.value);
//! assert_eq!(None, config.optional);
//! assert_eq!(false, config.verbose);
//! ```
//!
//! ## Features
//!
//! #### Predefined Sources
//! Predefined sources has the following order, [`Salak`] will find by sequence of these orders,
//! if the property with specified key is found at the current source, than return immediately. Otherwise,
//! it will search the next source.
//!
//! 1. Random source provides a group of keys can return random values.
//! * `random.u8`
//! * `random.u16`
//! * `random.u32`
//! * `random.u64`
//! * `random.u128`
//! * `random.usize`
//! * `random.i8`
//! * `random.i16`
//! * `random.i32`
//! * `random.i64`
//! * `random.i128`
//! * `random.isize`
//! 2. Custom arguments source. [`SalakBuilder::set()`] can set a single kv,
//! and [`SalakBuilder::set_args()`] can set a group of kvs.
//! 3. System environment source. Implemented by [`source::system_environment`].
//! 4. Profile specified file source, eg. `app-dev.toml`, supports reloading.
//! 5. No profile file source, eg. `app.toml`, supports reloading.
//! 6. Custom sources, which can register by [`Salak::register()`].
//!
//! #### Key Convention
//! Key is used for search configuration from [`Environment`], normally it is represented by string.
//! Key is a group of SubKey separated by dot(`.`), and SubKey is a name or a name followed by index.
//! 1. SubKey Format (`[a-z][_a-z0-9]+(\[[0-9]+\])*`)
//! * `a`
//! * `a0`
//! * `a_b`
//! * `a[0]`
//! * `a[0][0]`
//! 2. Key Format (`SubKey(\.SubKey)*`)
//! * `a`
//! * `a.b`
//! * `a.val[0]`
//! * `a_b[0]`
//!
//! #### Value Placeholder Parsing
//! 1. Placeholder Format
//! * `${key}` => Get value of `key`.
//! * `${key:default}` => Get value of `key`, if not exists return `default`.
//! 2. Escape Format
//! * `\$\{key\}` => Return `${key}`.
//! * `$`, `\`, `{`, `}` must use escape format.
//!
//! #### Attributes For Derive
//! `salak` supports some attributes for automatically derive [`FromEnvironment`].
//! All attributes have format `#[salak(..)]`, eg. `#[salak(default = "default value")]`.
//! 1. Struct Header Attribute.
//! * `#[salak(prefix = "salak.application")]`, has this attr will auto implement [`PrefixedFromEnvironment`].
//! 2. Struct Field Attribute.
//! * `#[salak(default = "value")]`, this attr can specify default value.
//! * `#[salak(name = "key")]`, this attr can specify property key, default convension is use field name.
//! * `#[salak(desc = "Field Description")]`, this attr can be describe this property.
//!
//! #### Reload Configuration
//! `salak` supports reload configurations. Since in rust mutable
//! and alias can't be used together, here we introduce a wrapper
//! [`wrapper::IORef`] for updating values when reloading.
//!
//! #### Resource Factory
//! [`Resource`] defines a standard way to create instance. [`Factory`] provides functions to initialize resource
//! and cache resource. Please refer to [salak_factory](https://docs.rs/salak_factory) for resource usage.
//! Feature 'app' should be open for this feature.
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
use parking_lot::Mutex;
#[cfg(feature = "derive")]
use crate::derive::KeyDesc;
#[cfg(feature = "derive")]
mod derive;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use crate::derive::{
AutoDeriveFromEnvironment, DescFromEnvironment, PrefixedFromEnvironment, SalakDescContext,
};
use raw_ioref::IORefT;
/// Auto derive [`FromEnvironment`] for struct.
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use salak_derive::FromEnvironment;
/// Auto derive [`Service`] for struct.
#[cfg(all(feature = "derive", feature = "app"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "derive", feature = "app"))))]
pub use salak_derive::Service;
use source_raw::PropertyRegistryInternal;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
mod args;
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub use crate::args::AppInfo;
mod err;
mod raw;
use crate::raw::SubKey;
pub use crate::raw::{IsProperty, Property};
mod raw_ioref;
mod raw_vec;
use crate::env::PREFIX;
pub use crate::env::{Salak, SalakBuilder};
mod env;
mod raw_enum;
pub use crate::err::PropertyError;
pub use crate::raw_enum::EnumProperty;
mod source_map;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
mod source_rand;
mod source_raw;
#[cfg(feature = "toml")]
#[cfg_attr(docsrs, doc(cfg(feature = "toml")))]
mod source_toml;
#[cfg(feature = "yaml")]
#[cfg_attr(docsrs, doc(cfg(feature = "yaml")))]
mod source_yaml;
use crate::source::Key;
use crate::source::SubKeys;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
mod app;
#[cfg(feature = "app")]
#[cfg_attr(docsrs, doc(cfg(feature = "app")))]
pub use crate::app::*;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;
/// Salak wrapper for configuration parsing.
///
/// Wrapper can determine extra behavior for parsing.
/// Such as check empty of vec or update when reloading.
pub mod wrapper {
pub use crate::raw_ioref::IORef;
pub use crate::raw_vec::NonEmptyVec;
}
/// Salak sources.
///
/// This mod exports all pub sources.
pub mod source {
#[cfg(feature = "args")]
#[cfg_attr(docsrs, doc(cfg(feature = "args")))]
pub(crate) use crate::args::from_args;
pub use crate::raw::Key;
pub use crate::raw::SubKeys;
pub use crate::source_map::system_environment;
pub use crate::source_map::HashMapSource;
}
pub(crate) type Res<T> = Result<T, PropertyError>;
pub(crate) type Void = Res<()>;
/// A property source defines how to load properties.
/// `salak` has some predefined sources, user can
/// provide custom source by implementing this trait.
///
/// Sources provided by `salak`.
///
/// * hashmap source
/// * std::env source
/// * toml source
/// * yaml source
pub trait PropertySource: Send + Sync {
/// [`PropertySource`] name.
fn name(&self) -> &str;
/// Get property by key.
fn get_property(&self, key: &Key<'_>) -> Option<Property<'_>>;
/// Get all subkeys with given key.
///
/// Subkeys are keys without dot('.').
/// This method is unstable, and will be simplified by hidding
/// Key and SubKeys.
fn get_sub_keys<'a>(&'a self, key: &Key<'_>, sub_keys: &mut SubKeys<'a>);
/// Check whether the [`PropertySource`] is empty.
/// Empty source will be ignored when registering to `salak`.
fn is_empty(&self) -> bool;
/// Reload source, if nothing changes, then return none.
#[inline]
fn reload_source(&self) -> Res<Option<Box<dyn PropertySource>>> {
Ok(None)
}
}
/// Environment defines interface for getting values, and reloading
/// configurations.
///
/// The implementor of this trait is [`Salak`].
pub trait Environment {
/// Get value by key.
/// * `key` - Configuration key.
///
/// Require means is if the value `T` is not found,
/// then error will be returned. But if you try to get
/// `Option<T>`, then not found will return `None`.
fn require<T: FromEnvironment>(&self, key: &str) -> Res<T>;
/// Reload configuration. If reloading is completed,
/// all values wrapped by [`wrapper::IORef`] will be updated.
///
/// Currently, this feature is unstable, the returned bool
/// value means reloading is completed without error.
fn reload(&self) -> Res<bool>;
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
#[inline]
/// Get value with predefined key.
///
/// [`PrefixedFromEnvironment`] can be auto derives by
/// [`salak_derive::FromEnvironment`] macro. It provides
/// a standard key for getting value `T`.
fn get<T: PrefixedFromEnvironment>(&self) -> Res<T> |
}
/// Context for implementing [`FromEnvironment`].
#[allow(missing_debug_implementations)]
pub struct SalakContext<'a> {
registry: &'a PropertyRegistryInternal<'a>,
iorefs: &'a Mutex<Vec<Box<dyn IORefT + Send>>>,
key: &'a mut Key<'a>,
}
/// Parsing value from environment by [`SalakContext`].
pub trait FromEnvironment: Sized {
/// Generate object from [`SalakContext`].
/// * `val` - Property value can be parsed from.
/// * `env` - Context.
///
/// ```no_run
/// use salak::*;
/// pub struct Config {
/// key: String
/// }
/// impl FromEnvironment for Config {
/// fn from_env(
/// val: Option<Property<'_>>,
/// env: &mut SalakContext<'_>,
/// ) -> Result<Self, PropertyError> {
/// Ok(Self{
/// key: env.require_def("key", None)?,
/// })
/// }
/// }
///
/// ```
fn from_env(val: Option<Property<'_>>, env: &mut SalakContext<'_>) -> Res<Self>;
}
| {
self.require::<T>(T::prefix())
} | identifier_body |
mod.rs | //! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;
use analyser::interface::{Solver, TensorsProxy};
use analyser::prelude::*;
use ops::nn::local_patch::{DataFormat, Padding};
use {DataType, Result, Tensor};
use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};
#[macro_use]
mod macros;
mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
pub mod prelude {
pub use super::{Attr, InferenceRulesOp, Op, OpRegister};
pub use super::{OpBuffer, QueuesBuffer, TensorView};
pub use std::collections::HashMap;
pub use std::marker::PhantomData;
pub use tensor::{DataType, Datum, Tensor};
pub use Result;
}
#[derive(Debug, Clone)]
pub enum TensorView {
Owned(Tensor),
Shared(Arc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
}
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn | (&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync +'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None.
fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> {
bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r,'s: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<'r, 'p: 'r,'s: 'r>(
&'s self,
_: &mut Solver<'r>,
_: &'p TensorsProxy,
_: &'p TensorsProxy,
) {
}
}
/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send +'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);
/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}
impl OpBuffer for EmptyBuffer {}
/// A buffer with a variable number of TensorView queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(Vec<VecDeque<TensorView>>);
impl OpBuffer for QueuesBuffer {}
impl QueuesBuffer {
/// Creates a new buffer with a given number of queues.
pub fn new(size: usize) -> QueuesBuffer {
QueuesBuffer(vec![VecDeque::new(); size])
}
/// Appends a new TensorView to each queue in the buffer.
pub fn append(&mut self, views: &mut [(Option<usize>, Option<TensorView>)]) -> Result<()> {
if views.len() > self.0.len() {
bail!("There are more input TensorViews than queues in the buffer.");
}
for (i, view) in views.iter_mut().enumerate() {
if view.1.is_some() {
self.0[i].push_back(view.1.take().unwrap())
}
}
Ok(())
}
/// Returns an iterator over all the queues in the buffer.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<TensorView>> {
self.0.iter()
}
/// Returns a mutable iterator over all the queues in the buffer.
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<TensorView>> {
self.0.iter_mut()
}
}
impl Index<usize> for QueuesBuffer {
type Output = VecDeque<TensorView>;
fn index(&self, index: usize) -> &VecDeque<TensorView> {
&self.0[index]
}
}
impl IndexMut<usize> for QueuesBuffer {
fn index_mut(&mut self, index: usize) -> &mut VecDeque<TensorView> {
&mut self.0[index]
}
}
| as_tensor | identifier_name |
mod.rs | //! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;
use analyser::interface::{Solver, TensorsProxy};
use analyser::prelude::*;
use ops::nn::local_patch::{DataFormat, Padding};
use {DataType, Result, Tensor};
use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};
#[macro_use]
mod macros;
mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
pub mod prelude {
pub use super::{Attr, InferenceRulesOp, Op, OpRegister};
pub use super::{OpBuffer, QueuesBuffer, TensorView};
pub use std::collections::HashMap;
pub use std::marker::PhantomData;
pub use tensor::{DataType, Datum, Tensor};
pub use Result;
}
#[derive(Debug, Clone)]
pub enum TensorView {
Owned(Tensor),
Shared(Arc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
}
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn as_tensor(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync +'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None. | bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r,'s: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<'r, 'p: 'r,'s: 'r>(
&'s self,
_: &mut Solver<'r>,
_: &'p TensorsProxy,
_: &'p TensorsProxy,
) {
}
}
/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send +'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);
/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}
impl OpBuffer for EmptyBuffer {}
/// A buffer with a variable number of TensorView queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(Vec<VecDeque<TensorView>>);
impl OpBuffer for QueuesBuffer {}
impl QueuesBuffer {
/// Creates a new buffer with a given number of queues.
pub fn new(size: usize) -> QueuesBuffer {
QueuesBuffer(vec![VecDeque::new(); size])
}
/// Appends a new TensorView to each queue in the buffer.
pub fn append(&mut self, views: &mut [(Option<usize>, Option<TensorView>)]) -> Result<()> {
if views.len() > self.0.len() {
bail!("There are more input TensorViews than queues in the buffer.");
}
for (i, view) in views.iter_mut().enumerate() {
if view.1.is_some() {
self.0[i].push_back(view.1.take().unwrap())
}
}
Ok(())
}
/// Returns an iterator over all the queues in the buffer.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<TensorView>> {
self.0.iter()
}
/// Returns a mutable iterator over all the queues in the buffer.
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<TensorView>> {
self.0.iter_mut()
}
}
impl Index<usize> for QueuesBuffer {
type Output = VecDeque<TensorView>;
fn index(&self, index: usize) -> &VecDeque<TensorView> {
&self.0[index]
}
}
impl IndexMut<usize> for QueuesBuffer {
fn index_mut(&mut self, index: usize) -> &mut VecDeque<TensorView> {
&mut self.0[index]
}
} | fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> { | random_line_split |
mod.rs | //! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;
use analyser::interface::{Solver, TensorsProxy};
use analyser::prelude::*;
use ops::nn::local_patch::{DataFormat, Padding};
use {DataType, Result, Tensor};
use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};
#[macro_use]
mod macros;
mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
pub mod prelude {
pub use super::{Attr, InferenceRulesOp, Op, OpRegister};
pub use super::{OpBuffer, QueuesBuffer, TensorView};
pub use std::collections::HashMap;
pub use std::marker::PhantomData;
pub use tensor::{DataType, Datum, Tensor};
pub use Result;
}
#[derive(Debug, Clone)]
pub enum TensorView {
Owned(Tensor),
Shared(Arc<Tensor>),
}
impl TensorView {
/// Creates a shared TensorView from any TensorView.
pub fn into_shared(self) -> TensorView |
/// Creates a Tensor from a TensorView.
pub fn into_tensor(self) -> Tensor {
match self {
TensorView::Owned(m) => m,
TensorView::Shared(m) => m.as_ref().clone(),
}
}
/// Returns a reference to the Tensor wrapped inside a TensorView.
pub fn as_tensor(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
/// Returns a shared copy of the TensorView, turning the one passed
/// as argument into a TensorView::Shared if necessary.
pub fn share(&mut self) -> TensorView {
// This is somewhat ugly, but sadly we couldn't find any other
// way to implement it. If we try to write something like:
// *self = TensorView::Shared(Arc::new(*m))
// the borrow checker will complain about *m being moved out of
// borrowed content, which makes sense but doesn't apply in our
// case because we will "give m back" to the TensorView, except
// wrapped around an Arc. The only way to get ownership of m is
// to use mem::replace, which means we have to create a "dummy"
// value to replace self first.
if let TensorView::Owned(_) = self {
let dummy = TensorView::Owned(Tensor::i32s(&[], &[0]).unwrap());
let shared = match mem::replace(self, dummy) {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
_ => panic!(),
};
*self = shared;
}
self.clone()
}
}
impl<M> From<M> for TensorView
where
Tensor: From<M>,
{
fn from(m: M) -> TensorView {
TensorView::Owned(m.into())
}
}
impl From<Arc<Tensor>> for TensorView {
fn from(m: Arc<Tensor>) -> TensorView {
TensorView::Shared(m)
}
}
impl ::std::ops::Deref for TensorView {
type Target = Tensor;
fn deref(&self) -> &Tensor {
match self {
&TensorView::Owned(ref m) => &m,
&TensorView::Shared(ref m) => m.as_ref(),
}
}
}
impl PartialEq for TensorView {
fn eq(&self, other: &TensorView) -> bool {
self.as_tensor() == other.as_tensor()
}
}
// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
I64(i64),
Usize(usize),
DataType(DataType),
DataFormat(DataFormat),
Padding(Padding),
Tensor(Tensor),
UsizeVec(Vec<usize>),
IsizeVec(Vec<isize>),
}
/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync +'static + InferenceOp {
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr>;
/// Evaluates the operation given the input tensors.
fn eval(&self, inputs: Vec<TensorView>) -> Result<Vec<TensorView>>;
/// Returns a new streaming buffer for the operation.
fn new_buffer(&self) -> Box<OpBuffer> {
Box::new(EmptyBuffer {})
}
/// Evaluates one step of the operation on the given input tensors.
/// This is only implemented for operators which support streaming.
///
/// The input tensors are annotated with an Option<usize>:
/// - None if the tensor doesn't have a streaming dimension.
/// - Option(d) if the tensor is being streamed on dimension d.
///
/// If an input tensor has a streaming dimension, the corresponding
/// TensorView will only contain a _chunk_ of input of size 1 along
/// that dimension. Note that each chunk will only be passed once
/// to the step function, so it should use the provided buffer to
/// store whichever chunks it needs for future computations.
///
/// The function should return Some(chunks) when it has computed
/// new chunks, and None if it has computed an intermediary result
/// successfully but doesn't have new output chunks ready yet.
///
/// For operators like Concat, multiple input tensors might have a
/// streaming dimension. In that case, at each call to step, only
/// one of the streaming inputs will receive new chunk while the
/// others will receive None.
fn step(
&self,
_inputs: Vec<(Option<usize>, Option<TensorView>)>,
_buffer: &mut Box<OpBuffer>,
) -> Result<Option<Vec<TensorView>>> {
bail!("Streaming is not available for operator {:?}", self)
}
/// Infers properties about the input and output tensors.
///
/// The `inputs` and `outputs` arguments correspond to properties about
/// the input and output tensors that are already known.
///
/// Returns Err in case of an unrecoverable error during the inference,
/// and the refined properties about the inputs and outputs otherwise.
fn infer_and_propagate(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;
if infered_inputs.iter().all(|i| i.value.is_concrete()) {
let input_values = infered_inputs
.iter()
.map(|i| i.value.concretize().unwrap().clone().into())
.collect(); // checked
let output_value = self.eval(input_values)?.pop().unwrap();
Ok((
infered_inputs,
vec![::analyser::helpers::tensor_to_fact(
output_value.into_tensor(),
)],
))
} else {
Ok((infered_inputs, infered_outputs))
}
}
fn const_value(&self) -> Option<Tensor> {
None
}
}
pub trait InferenceOp {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)>;
}
pub trait InferenceRulesOp {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r,'s: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p TensorsProxy,
outputs: &'p TensorsProxy,
);
}
impl<O: InferenceRulesOp> InferenceOp for O {
fn infer(
&self,
inputs: Vec<TensorFact>,
outputs: Vec<TensorFact>,
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let inputs_proxy = TensorsProxy::new(vec![0].into());
let outputs_proxy = TensorsProxy::new(vec![1].into());
let mut solver = Solver::default();
self.rules(&mut solver, &inputs_proxy, &outputs_proxy);
solver.infer((inputs, outputs))
}
}
clone_trait_object!(Op);
#[cfg(feature = "serialize")]
impl Serialize for Op {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
self.get_attributes().serialize(serializer)
}
}
pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;
pub struct OpBuilder(OpRegister);
impl OpBuilder {
pub fn new() -> OpBuilder {
let mut reg = OpRegister::new();
array::register_all_ops(&mut reg);
cast::register_all_ops(&mut reg);
konst::register_all_ops(&mut reg);
math::register_all_ops(&mut reg);
nn::register_all_ops(&mut reg);
OpBuilder(reg)
}
pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
match self.0.get(pb.get_op()) {
Some(builder) => builder(pb),
None => Ok(Box::new(UnimplementedOp(
pb.get_op().to_string(),
pb.to_owned(),
))),
}
}
}
#[derive(Debug, Clone)]
pub struct UnimplementedOp(String, ::tfpb::node_def::NodeDef);
impl Op for UnimplementedOp {
/// Evaluates the operation given the input tensors.
fn eval(&self, _inputs: Vec<TensorView>) -> Result<Vec<TensorView>> {
Err(format!("unimplemented operation: {}", self.0))?
}
/// Returns the attributes of the operation and their values.
fn get_attributes(&self) -> HashMap<&'static str, Attr> {
hashmap!{} // FIXME
}
}
impl InferenceRulesOp for UnimplementedOp {
fn rules<'r, 'p: 'r,'s: 'r>(
&'s self,
_: &mut Solver<'r>,
_: &'p TensorsProxy,
_: &'p TensorsProxy,
) {
}
}
/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send +'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);
/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}
impl OpBuffer for EmptyBuffer {}
/// A buffer with a variable number of TensorView queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(Vec<VecDeque<TensorView>>);
impl OpBuffer for QueuesBuffer {}
impl QueuesBuffer {
/// Creates a new buffer with a given number of queues.
pub fn new(size: usize) -> QueuesBuffer {
QueuesBuffer(vec![VecDeque::new(); size])
}
/// Appends a new TensorView to each queue in the buffer.
pub fn append(&mut self, views: &mut [(Option<usize>, Option<TensorView>)]) -> Result<()> {
if views.len() > self.0.len() {
bail!("There are more input TensorViews than queues in the buffer.");
}
for (i, view) in views.iter_mut().enumerate() {
if view.1.is_some() {
self.0[i].push_back(view.1.take().unwrap())
}
}
Ok(())
}
/// Returns an iterator over all the queues in the buffer.
pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<TensorView>> {
self.0.iter()
}
/// Returns a mutable iterator over all the queues in the buffer.
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<TensorView>> {
self.0.iter_mut()
}
}
impl Index<usize> for QueuesBuffer {
type Output = VecDeque<TensorView>;
fn index(&self, index: usize) -> &VecDeque<TensorView> {
&self.0[index]
}
}
impl IndexMut<usize> for QueuesBuffer {
fn index_mut(&mut self, index: usize) -> &mut VecDeque<TensorView> {
&mut self.0[index]
}
}
| {
match self {
TensorView::Owned(m) => TensorView::Shared(Arc::new(m)),
TensorView::Shared(_) => self,
}
} | identifier_body |
lib.rs | mod file_log;
mod formatter;
pub mod log_macro;
use std::env;
use std::fmt;
use std::io::{self, BufWriter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::thread;
use log::{self, SetLoggerError};
use slog::{self, slog_o, Drain, FnValue, Key, OwnedKVList, PushFnValue, Record, KV};
use slog_async::{Async, OverflowStrategy};
use slog_term::{Decorator, PlainDecorator, RecordDecorator};
use self::file_log::{RotateBySize, RotateByTime, RotatingFileLogger, RotatingFileLoggerBuilder};
pub use slog::{FilterFn, Level};
use std::fmt::Arguments;
use std::time::Duration;
// The suffix appended to the end of rotated log files by datetime log rotator
// Warning: Diagnostics service parses log files by file name format.
// Remember to update the corresponding code when suffix layout is changed.
pub const DATETIME_ROTATE_SUFFIX: &str = "%Y-%m-%d-%H:%M:%S%.f";
// Default is 128.
// Extended since blocking is set, and we don't want to block very often.
const SLOG_CHANNEL_SIZE: usize = 10240;
// Default is DropAndReport.
// It is not desirable to have dropped logs in our use case.
const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block;
const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f %:z";
static LOG_LEVEL: AtomicUsize = AtomicUsize::new(usize::max_value());
#[derive(Clone, Debug)]
pub enum LogFormat {
Text,
Json,
}
/// Makes a thread name with an additional tag inherited from the current thread.
#[macro_export]
macro_rules! thd_name {
($name:expr) => {{
$crate::get_tag_from_thread_name()
.map(|tag| format!("{}::{}", $name, tag))
.unwrap_or_else(|| $name.to_owned())
}};
}
pub fn get_tag_from_thread_name() -> Option<String> {
thread::current()
.name()
.and_then(|name| name.split("::").skip(1).last())
.map(From::from)
}
pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send +'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if!disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target!= module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N:'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
}
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = (); | type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let tag = record.tag();
println!("{}", tag);
if self.slow.is_some() && tag.starts_with("slow_log") {
self.slow.as_ref().unwrap().log(record, values)
} else {
self.normal.log(record, values)
}
}
}
/// Writes log header to decorator.
fn write_log_header(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_timestamp()?;
write!(
decorator,
"[{}]",
chrono::Local::now().format(TIMESTAMP_FORMAT)
)?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_level()?;
write!(decorator, "[{}]", get_unified_log_level(record.level()))?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
// Write source file info.
decorator.start_msg()?;
if let Some(path) = Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
{
write!(decorator, "[")?;
formatter::write_file_name(decorator, path)?;
write!(decorator, ":{}]", record.line())?;
} else {
write!(decorator, "[<unknown>]")?;
}
Ok(())
}
/// Writes log message to decorator.
fn write_log_msg(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_msg()?;
write!(decorator, "[")?;
let msg = format!("{}", record.msg());
formatter::write_escaped_str(decorator, &msg)?;
write!(decorator, "]")?;
Ok(())
}
/// Writes log fields to decorator.
fn write_log_fields(
decorator: &mut dyn RecordDecorator,
record: &Record<'_>,
values: &OwnedKVList,
) -> io::Result<()> {
let mut serializer = Serializer::new(decorator);
record.kv().serialize(record, &mut serializer)?;
values.serialize(record, &mut serializer)?;
serializer.finish();
Ok(())
}
struct Serializer<'a> {
decorator: &'a mut dyn RecordDecorator,
}
impl<'a> Serializer<'a> {
fn new(decorator: &'a mut dyn RecordDecorator) -> Self {
Self { decorator }
}
fn write_whitespace(&mut self) -> io::Result<()> {
self.decorator.start_whitespace()?;
write!(self.decorator, " ")?;
Ok(())
}
fn finish(self) {}
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {}
}
impl<'a> slog::Serializer for Serializer<'a> {
fn emit_none(&mut self, key: Key) -> slog::Result {
self.emit_arguments(key, &format_args!("None"))
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments<'_>) -> slog::Result {
self.write_whitespace()?;
// Write key
write!(self.decorator, "[")?;
self.decorator.start_key()?;
formatter::write_escaped_str(&mut self.decorator, key as &str)?;
// Write separator
self.decorator.start_separator()?;
write!(self.decorator, "=")?;
// Write value
let value = format!("{}", val);
self.decorator.start_value()?;
formatter::write_escaped_str(self.decorator, &value)?;
self.decorator.reset()?;
write!(self.decorator, "]")?;
Ok(())
}
} | random_line_split |
|
lib.rs | mod file_log;
mod formatter;
pub mod log_macro;
use std::env;
use std::fmt;
use std::io::{self, BufWriter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::thread;
use log::{self, SetLoggerError};
use slog::{self, slog_o, Drain, FnValue, Key, OwnedKVList, PushFnValue, Record, KV};
use slog_async::{Async, OverflowStrategy};
use slog_term::{Decorator, PlainDecorator, RecordDecorator};
use self::file_log::{RotateBySize, RotateByTime, RotatingFileLogger, RotatingFileLoggerBuilder};
pub use slog::{FilterFn, Level};
use std::fmt::Arguments;
use std::time::Duration;
// The suffix appended to the end of rotated log files by datetime log rotator
// Warning: Diagnostics service parses log files by file name format.
// Remember to update the corresponding code when suffix layout is changed.
pub const DATETIME_ROTATE_SUFFIX: &str = "%Y-%m-%d-%H:%M:%S%.f";
// Default is 128.
// Extended since blocking is set, and we don't want to block very often.
const SLOG_CHANNEL_SIZE: usize = 10240;
// Default is DropAndReport.
// It is not desirable to have dropped logs in our use case.
const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block;
const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f %:z";
static LOG_LEVEL: AtomicUsize = AtomicUsize::new(usize::max_value());
#[derive(Clone, Debug)]
pub enum LogFormat {
Text,
Json,
}
/// Makes a thread name with an additional tag inherited from the current thread.
#[macro_export]
macro_rules! thd_name {
($name:expr) => {{
$crate::get_tag_from_thread_name()
.map(|tag| format!("{}::{}", $name, tag))
.unwrap_or_else(|| $name.to_owned())
}};
}
pub fn get_tag_from_thread_name() -> Option<String> {
thread::current()
.name()
.and_then(|name| name.split("::").skip(1).last())
.map(From::from)
}
pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send +'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if!disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target!= module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N:'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str |
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let tag = record.tag();
println!("{}", tag);
if self.slow.is_some() && tag.starts_with("slow_log") {
self.slow.as_ref().unwrap().log(record, values)
} else {
self.normal.log(record, values)
}
}
}
/// Writes log header to decorator.
fn write_log_header(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_timestamp()?;
write!(
decorator,
"[{}]",
chrono::Local::now().format(TIMESTAMP_FORMAT)
)?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_level()?;
write!(decorator, "[{}]", get_unified_log_level(record.level()))?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
// Write source file info.
decorator.start_msg()?;
if let Some(path) = Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
{
write!(decorator, "[")?;
formatter::write_file_name(decorator, path)?;
write!(decorator, ":{}]", record.line())?;
} else {
write!(decorator, "[<unknown>]")?;
}
Ok(())
}
/// Writes log message to decorator.
fn write_log_msg(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_msg()?;
write!(decorator, "[")?;
let msg = format!("{}", record.msg());
formatter::write_escaped_str(decorator, &msg)?;
write!(decorator, "]")?;
Ok(())
}
/// Writes log fields to decorator.
fn write_log_fields(
decorator: &mut dyn RecordDecorator,
record: &Record<'_>,
values: &OwnedKVList,
) -> io::Result<()> {
let mut serializer = Serializer::new(decorator);
record.kv().serialize(record, &mut serializer)?;
values.serialize(record, &mut serializer)?;
serializer.finish();
Ok(())
}
struct Serializer<'a> {
decorator: &'a mut dyn RecordDecorator,
}
impl<'a> Serializer<'a> {
fn new(decorator: &'a mut dyn RecordDecorator) -> Self {
Self { decorator }
}
fn write_whitespace(&mut self) -> io::Result<()> {
self.decorator.start_whitespace()?;
write!(self.decorator, " ")?;
Ok(())
}
fn finish(self) {}
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {}
}
impl<'a> slog::Serializer for Serializer<'a> {
fn emit_none(&mut self, key: Key) -> slog::Result {
self.emit_arguments(key, &format_args!("None"))
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments<'_>) -> slog::Result {
self.write_whitespace()?;
// Write key
write!(self.decorator, "[")?;
self.decorator.start_key()?;
formatter::write_escaped_str(&mut self.decorator, key as &str)?;
// Write separator
self.decorator.start_separator()?;
write!(self.decorator, "=")?;
// Write value
let value = format!("{}", val);
self.decorator.start_value()?;
formatter::write_escaped_str(self.decorator, &value)?;
self.decorator.reset()?;
write!(self.decorator, "]")?;
Ok(())
}
}
| {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
} | identifier_body |
lib.rs | mod file_log;
mod formatter;
pub mod log_macro;
use std::env;
use std::fmt;
use std::io::{self, BufWriter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::thread;
use log::{self, SetLoggerError};
use slog::{self, slog_o, Drain, FnValue, Key, OwnedKVList, PushFnValue, Record, KV};
use slog_async::{Async, OverflowStrategy};
use slog_term::{Decorator, PlainDecorator, RecordDecorator};
use self::file_log::{RotateBySize, RotateByTime, RotatingFileLogger, RotatingFileLoggerBuilder};
pub use slog::{FilterFn, Level};
use std::fmt::Arguments;
use std::time::Duration;
// The suffix appended to the end of rotated log files by datetime log rotator
// Warning: Diagnostics service parses log files by file name format.
// Remember to update the corresponding code when suffix layout is changed.
pub const DATETIME_ROTATE_SUFFIX: &str = "%Y-%m-%d-%H:%M:%S%.f";
// Default is 128.
// Extended since blocking is set, and we don't want to block very often.
const SLOG_CHANNEL_SIZE: usize = 10240;
// Default is DropAndReport.
// It is not desirable to have dropped logs in our use case.
const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block;
const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f %:z";
static LOG_LEVEL: AtomicUsize = AtomicUsize::new(usize::max_value());
#[derive(Clone, Debug)]
pub enum LogFormat {
Text,
Json,
}
/// Makes a thread name with an additional tag inherited from the current thread.
#[macro_export]
macro_rules! thd_name {
($name:expr) => {{
$crate::get_tag_from_thread_name()
.map(|tag| format!("{}::{}", $name, tag))
.unwrap_or_else(|| $name.to_owned())
}};
}
pub fn get_tag_from_thread_name() -> Option<String> {
thread::current()
.name()
.and_then(|name| name.split("::").skip(1).last())
.map(From::from)
}
pub fn init_log<D>(
drain: D,
level: Level,
use_async: bool,
init_stdlog: bool,
mut disabled_targets: Vec<String>,
slow_threshold: u64,
) -> Result<(), SetLoggerError>
where
D: Drain + Send +'static,
<D as Drain>::Err: std::fmt::Display,
{
// Set the initial log level used by the Drains
LOG_LEVEL.store(level.as_usize(), Ordering::Relaxed);
// Only for debug purpose, so use environment instead of configuration file.
if let Ok(extra_modules) = env::var("FASTJOB_DISABLE_LOG_TARGETS") {
disabled_targets.extend(extra_modules.split(',').map(ToOwned::to_owned));
}
let filter = move |record: &Record| {
if!disabled_targets.is_empty() {
// Here get the highest level module name to check.
let module = record.module().splitn(2, "::").next().unwrap();
disabled_targets.iter().all(|target| target!= module)
} else {
true
}
};
let logger = if use_async {
let drain = Async::new(LogAndFuse(drain))
.chan_size(SLOG_CHANNEL_SIZE)
.overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY)
.thread_name(thd_name!("slogger"))
.build()
.filter_level(level)
.fuse();
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
} else {
let drain = LogAndFuse(Mutex::new(drain).filter_level(level));
let drain = SlowLogFilter {
threshold: slow_threshold,
inner: drain,
};
let filtered = drain.filter(filter).fuse();
slog::Logger::root(filtered, slog_o!())
};
set_global_logger(level, init_stdlog, logger)
}
pub fn set_global_logger(
level: Level,
init_stdlog: bool,
logger: slog::Logger,
) -> Result<(), SetLoggerError> {
slog_global::set_global(logger);
if init_stdlog {
slog_global::redirect_std_log(Some(level))?;
grpcio::redirect_log();
}
Ok(())
}
/// Constructs a new file writer which outputs log to a file at the specified path.
/// The file writer rotates for the specified timespan.
pub fn file_writer<N>(
path: impl AsRef<Path>,
rotation_timespan: Duration,
rotation_size: u64,
rename: N,
) -> io::Result<BufWriter<RotatingFileLogger>>
where
N:'static + Send + Fn(&Path) -> io::Result<PathBuf>,
{
let logger = BufWriter::new(
RotatingFileLoggerBuilder::builder(rename)
.add_path(path)
.add_rotator(RotateByTime::new(rotation_timespan))
.add_rotator(RotateBySize::new(rotation_size))
.build()?,
);
Ok(logger)
}
/// Constructs a new terminal writer which outputs logs to stderr.
pub fn term_writer() -> io::Stderr {
io::stderr()
}
/// Formats output logs to "FastJob Log Format".
pub fn text_format<W>(io: W) -> FastJobFormat<PlainDecorator<W>>
where
W: io::Write,
{
let decorator = PlainDecorator::new(io);
FastJobFormat::new(decorator)
}
/// Formats output logs to JSON format.
pub fn json_format<W>(io: W) -> slog_json::Json<W>
where
W: io::Write,
{
slog_json::Json::new(io)
.set_newlines(true)
.set_flush(true)
.add_key_value(slog_o!(
"message" => PushFnValue(|record, ser| ser.emit(record.msg())),
"caller" => PushFnValue(|record, ser| ser.emit(format_args!(
"{}:{}",
Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
.unwrap_or("<unknown>"),
record.line(),
))),
"level" => FnValue(|record| get_unified_log_level(record.level())),
"time" => FnValue(|_| chrono::Local::now().format(TIMESTAMP_FORMAT).to_string()),
))
.build()
}
pub fn get_level_by_string(lv: &str) -> Option<Level> {
match &*lv.to_owned().to_lowercase() {
"critical" => Some(Level::Critical),
"error" => Some(Level::Error),
// We support `warn` due to legacy.
"warning" | "warn" => Some(Level::Warning),
"debug" => Some(Level::Debug),
"trace" => Some(Level::Trace),
"info" => Some(Level::Info),
_ => None,
}
}
// The `to_string()` function of `slog::Level` produces values like `erro` and `trce` instead of
// the full words. This produces the full word.
pub fn get_string_by_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "critical",
Level::Error => "error",
Level::Warning => "warning",
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
}
}
// Converts `slog::Level` to unified log level format.
fn get_unified_log_level(lv: Level) -> &'static str {
match lv {
Level::Critical => "FATAL",
Level::Error => "ERROR",
Level::Warning => "WARN",
Level::Info => "INFO",
Level::Debug => "DEBUG",
Level::Trace => "TRACE",
}
}
pub fn convert_slog_level_to_log_level(lv: Level) -> log::Level {
match lv {
Level::Critical | Level::Error => log::Level::Error,
Level::Warning => log::Level::Warn,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
Level::Info => log::Level::Info,
}
}
pub fn convert_log_level_to_slog_level(lv: log::Level) -> Level {
match lv {
log::Level::Error => Level::Error,
log::Level::Warn => Level::Warning,
log::Level::Debug => Level::Debug,
log::Level::Trace => Level::Trace,
log::Level::Info => Level::Info,
}
}
pub fn get_log_level() -> Option<Level> {
Level::from_usize(LOG_LEVEL.load(Ordering::Relaxed))
}
pub fn set_log_level(new_level: Level) {
LOG_LEVEL.store(new_level.as_usize(), Ordering::SeqCst)
}
pub struct FastJobFormat<D>
where
D: Decorator,
{
decorator: D,
}
impl<D> FastJobFormat<D>
where
D: Decorator,
{
pub fn new(decorator: D) -> Self {
Self { decorator }
}
}
impl<D> Drain for FastJobFormat<D>
where
D: Decorator,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
self.decorator.with_record(record, values, |decorator| {
write_log_header(decorator, record)?;
write_log_msg(decorator, record)?;
write_log_fields(decorator, record, values)?;
decorator.start_whitespace()?;
writeln!(decorator)?;
decorator.flush()?;
Ok(())
})?;
}
Ok(())
}
}
struct LogAndFuse<D>(D);
impl<D> Drain for LogAndFuse<D>
where
D: Drain,
<D as Drain>::Err: std::fmt::Display,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.level().as_usize() <= LOG_LEVEL.load(Ordering::Relaxed) {
if let Err(e) = self.0.log(record, values) {
let fatal_drainer = Mutex::new(text_format(term_writer())).ignore_res();
fatal_drainer.log(record, values).unwrap();
let fatal_logger = slog::Logger::root(fatal_drainer, slog_o!());
slog::slog_crit!(
fatal_logger,
"logger encountered error";
"err" => %e,
)
}
}
Ok(())
}
}
/// Filters logs with operation cost lower than threshold. Otherwise output logs to inner drainer.
struct SlowLogFilter<D> {
threshold: u64,
inner: D,
}
impl<D> Drain for SlowLogFilter<D>
where
D: Drain<Ok = (), Err = slog::Never>,
{
type Ok = ();
type Err = slog::Never;
fn log(&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
if record.tag() == "slow_log" {
let mut s = SlowCostSerializer { cost: None };
let kv = record.kv();
let _ = kv.serialize(record, &mut s);
if let Some(cost) = s.cost {
if cost <= self.threshold {
return Ok(());
}
}
}
self.inner.log(record, values)
}
}
struct SlowCostSerializer {
// None means input record without key `takes`
cost: Option<u64>,
}
impl slog::ser::Serializer for SlowCostSerializer {
fn emit_arguments(&mut self, _key: Key, _val: &fmt::Arguments<'_>) -> slog::Result {
Ok(())
}
fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result {
if key == "takes" {
self.cost = Some(val);
}
Ok(())
}
}
/// Special struct for slow log cost serializing
pub struct LogCost(pub u64);
impl slog::Value for LogCost {
fn serialize(
&self,
_record: &Record,
key: Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_u64(key, self.0)
}
}
/// Dispatches logs to a normal `Drain` or a slow-log specialized `Drain` by tag
pub struct LogDispatcher<N: Drain, S: Drain> {
normal: N,
slow: Option<S>,
}
impl<N: Drain, S: Drain> LogDispatcher<N, S> {
pub fn new(normal: N, slow: Option<S>) -> Self {
Self { normal, slow }
}
}
impl<N, S> Drain for LogDispatcher<N, S>
where
N: Drain<Ok = (), Err = io::Error>,
S: Drain<Ok = (), Err = io::Error>,
{
type Ok = ();
type Err = io::Error;
fn | (&self, record: &Record<'_>, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let tag = record.tag();
println!("{}", tag);
if self.slow.is_some() && tag.starts_with("slow_log") {
self.slow.as_ref().unwrap().log(record, values)
} else {
self.normal.log(record, values)
}
}
}
/// Writes log header to decorator.
fn write_log_header(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_timestamp()?;
write!(
decorator,
"[{}]",
chrono::Local::now().format(TIMESTAMP_FORMAT)
)?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_level()?;
write!(decorator, "[{}]", get_unified_log_level(record.level()))?;
decorator.start_whitespace()?;
write!(decorator, " ")?;
// Write source file info.
decorator.start_msg()?;
if let Some(path) = Path::new(record.file())
.file_name()
.and_then(|path| path.to_str())
{
write!(decorator, "[")?;
formatter::write_file_name(decorator, path)?;
write!(decorator, ":{}]", record.line())?;
} else {
write!(decorator, "[<unknown>]")?;
}
Ok(())
}
/// Writes log message to decorator.
fn write_log_msg(decorator: &mut dyn RecordDecorator, record: &Record<'_>) -> io::Result<()> {
decorator.start_whitespace()?;
write!(decorator, " ")?;
decorator.start_msg()?;
write!(decorator, "[")?;
let msg = format!("{}", record.msg());
formatter::write_escaped_str(decorator, &msg)?;
write!(decorator, "]")?;
Ok(())
}
/// Writes log fields to decorator.
fn write_log_fields(
decorator: &mut dyn RecordDecorator,
record: &Record<'_>,
values: &OwnedKVList,
) -> io::Result<()> {
let mut serializer = Serializer::new(decorator);
record.kv().serialize(record, &mut serializer)?;
values.serialize(record, &mut serializer)?;
serializer.finish();
Ok(())
}
struct Serializer<'a> {
decorator: &'a mut dyn RecordDecorator,
}
impl<'a> Serializer<'a> {
fn new(decorator: &'a mut dyn RecordDecorator) -> Self {
Self { decorator }
}
fn write_whitespace(&mut self) -> io::Result<()> {
self.decorator.start_whitespace()?;
write!(self.decorator, " ")?;
Ok(())
}
fn finish(self) {}
}
impl<'a> Drop for Serializer<'a> {
fn drop(&mut self) {}
}
impl<'a> slog::Serializer for Serializer<'a> {
fn emit_none(&mut self, key: Key) -> slog::Result {
self.emit_arguments(key, &format_args!("None"))
}
fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments<'_>) -> slog::Result {
self.write_whitespace()?;
// Write key
write!(self.decorator, "[")?;
self.decorator.start_key()?;
formatter::write_escaped_str(&mut self.decorator, key as &str)?;
// Write separator
self.decorator.start_separator()?;
write!(self.decorator, "=")?;
// Write value
let value = format!("{}", val);
self.decorator.start_value()?;
formatter::write_escaped_str(self.decorator, &value)?;
self.decorator.reset()?;
write!(self.decorator, "]")?;
Ok(())
}
}
| log | identifier_name |
section_0771_to_0788.rs | //! @ The |align_state| and |preamble| variables are initialized elsewhere.
//!
//! @<Set init...@>=
//! align_ptr:=null; cur_align:=null; cur_span:=null; cur_loop:=null;
//! cur_head:=null; cur_tail:=null;
//!
//! @ Alignment stack maintenance is handled by a pair of trivial routines
//! called |push_alignment| and |pop_alignment|.
//!
//! @p procedure push_alignment;
//! var p:pointer; {the new alignment stack node}
//! begin p:=get_node(align_stack_node_size);
//! link(p):=align_ptr; info(p):=cur_align;
//! llink(p):=preamble; rlink(p):=cur_span;
//! mem[p+2].int:=cur_loop; mem[p+3].int:=align_state;
//! info(p+4):=cur_head; link(p+4):=cur_tail;
//! align_ptr:=p;
//! cur_head:=get_avail;
//! end;
//! @#
//! procedure pop_alignment;
//! var p:pointer; {the top alignment stack node}
//! begin free_avail(cur_head);
//! p:=align_ptr;
//! cur_tail:=link(p+4); cur_head:=info(p+4);
//! align_state:=mem[p+3].int; cur_loop:=mem[p+2].int;
//! cur_span:=rlink(p); preamble:=llink(p);
//! cur_align:=info(p); align_ptr:=link(p);
//! free_node(p,align_stack_node_size);
//! end;
//!
//! @ \TeX\ has eight procedures that govern alignments: |init_align| and
//! |fin_align| are used at the very beginning and the very end; |init_row| and
//! |fin_row| are used at the beginning and end of individual rows; |init_span|
//! is used at the beginning of a sequence of spanned columns (possibly involving
//! only one column); |init_col| and |fin_col| are used at the beginning and
//! end of individual columns; and |align_peek| is used after \.{\\cr} to see
//! whether the next item is \.{\\noalign}.
//!
//! We shall consider these routines in the order they are first used during
//! the course of a complete \.{\\halign}, namely |init_align|, |align_peek|,
//! |init_row|, |init_span|, |init_col|, |fin_col|, |fin_row|, |fin_align|.
//!
//! @ When \.{\\halign} or \.{\\valign} has been scanned in an appropriate
//! mode, \TeX\ calls |init_align|, whose task is to get everything off to a
//! good start. This mostly involves scanning the preamble and putting its
//! information into the preamble list.
//! @^preamble@>
//! | //! procedure@?align_peek; forward;@t\2@>@/
//! procedure@?normal_paragraph; forward;@t\2@>@/
//! procedure init_align;
//! label done, done1, done2, continue;
//! var save_cs_ptr:pointer; {|warning_index| value for error messages}
//! @!p:pointer; {for short-term temporary use}
//! begin save_cs_ptr:=cur_cs; {\.{\\halign} or \.{\\valign}, usually}
//! push_alignment; align_state:=-1000000; {enter a new alignment level}
//! @<Check for improper alignment in displayed math@>;
//! push_nest; {enter a new semantic level}
//! @<Change current mode to |-vmode| for \.{\\halign}, |-hmode| for \.{\\valign}@>;
//! scan_spec(align_group,false);@/
//! @<Scan the preamble and record it in the |preamble| list@>;
//! new_save_level(align_group);
//! if every_cr<>null then begin_token_list(every_cr,every_cr_text);
//! align_peek; {look for \.{\\noalign} or \.{\\omit}}
//! end;
//!
//! @ In vertical modes, |prev_depth| already has the correct value. But
//! if we are in |mmode| (displayed formula mode), we reach out to the
//! enclosing vertical mode for the |prev_depth| value that produces the
//! correct baseline calculations.
//!
//! @<Change current mode...@>=
//! if mode=mmode then
//! begin mode:=-vmode; prev_depth:=nest[nest_ptr-2].aux_field.sc;
//! end
//! else if mode>0 then negate(mode)
//!
//! @ When \.{\\halign} is used as a displayed formula, there should be
//! no other pieces of mlists present.
//!
//! @<Check for improper alignment in displayed math@>=
//! if (mode=mmode)and((tail<>head)or(incompleat_noad<>null)) then
//! begin print_err("Improper "); print_esc("halign"); print(" inside $$'s");
//! @.Improper \\halign...@>
//! help3("Displays can use special alignments (like \eqalignno)")@/
//! ("only if nothing but the alignment itself is between $$'s.")@/
//! ("So I've deleted the formulas that preceded this alignment.");
//! error; flush_math;
//! end
//!
//! @ @<Scan the preamble and record it in the |preamble| list@>=
//! preamble:=null; cur_align:=align_head; cur_loop:=null; scanner_status:=aligning;
//! warning_index:=save_cs_ptr; align_state:=-1000000;
//! {at this point, |cur_cmd=left_brace|}
//! loop@+ begin @<Append the current tabskip glue to the preamble list@>;
//! if cur_cmd=car_ret then goto done; {\.{\\cr} ends the preamble}
//! @<Scan preamble text until |cur_cmd| is |tab_mark| or |car_ret|,
//! looking for changes in the tabskip glue; append an
//! alignrecord to the preamble list@>;
//! end;
//! done: scanner_status:=normal
//!
//! @ @<Append the current tabskip glue to the preamble list@>=
//! link(cur_align):=new_param_glue(tab_skip_code);
//! cur_align:=link(cur_align)
//!
//! @ @<Scan preamble text until |cur_cmd| is |tab_mark| or |car_ret|...@>=
//! @<Scan the template \<u_j>, putting the resulting token list in |hold_head|@>;
//! link(cur_align):=new_null_box; cur_align:=link(cur_align); {a new alignrecord}
//! info(cur_align):=end_span; width(cur_align):=null_flag;
//! u_part(cur_align):=link(hold_head);
//! @<Scan the template \<v_j>, putting the resulting token list in |hold_head|@>;
//! v_part(cur_align):=link(hold_head)
//!
//! @ We enter `\.{\\span}' into |eqtb| with |tab_mark| as its command code,
//! and with |span_code| as the command modifier. This makes \TeX\ interpret it
//! essentially the same as an alignment delimiter like `\.\&', yet it is
//! recognizably different when we need to distinguish it from a normal delimiter.
//! It also turns out to be useful to give a special |cr_code| to `\.{\\cr}',
//! and an even larger |cr_cr_code| to `\.{\\crcr}'.
//!
//! The end of a template is represented by two ``frozen'' control sequences
//! called \.{\\endtemplate}. The first has the command code |end_template|, which
//! is |>outer_call|, so it will not easily disappear in the presence of errors.
//! The |get_x_token| routine converts the first into the second, which has |endv|
//! as its command code.
//!
//! @d span_code=256 {distinct from any character}
//! @d cr_code=257 {distinct from |span_code| and from any character}
//! @d cr_cr_code=cr_code+1 {this distinguishes \.{\\crcr} from \.{\\cr}}
//! @d end_template_token==cs_token_flag+frozen_end_template
//!
//! @<Put each of \TeX's primitives into the hash table@>=
//! primitive("span",tab_mark,span_code);@/
//! @!@:span_}{\.{\\span} primitive@>
//! primitive("cr",car_ret,cr_code);
//! @!@:cr_}{\.{\\cr} primitive@>
//! text(frozen_cr):="cr"; eqtb[frozen_cr]:=eqtb[cur_val];@/
//! primitive("crcr",car_ret,cr_cr_code);
//! @!@:cr_cr_}{\.{\\crcr} primitive@>
//! text(frozen_end_template):="endtemplate"; text(frozen_endv):="endtemplate";
//! eq_type(frozen_endv):=endv; equiv(frozen_endv):=null_list;
//! eq_level(frozen_endv):=level_one;@/
//! eqtb[frozen_end_template]:=eqtb[frozen_endv];
//! eq_type(frozen_end_template):=end_template;
//!
//! @ @<Cases of |print_cmd_chr|...@>=
//! tab_mark: if chr_code=span_code then print_esc("span")
//! else chr_cmd("alignment tab character ");
//! car_ret: if chr_code=cr_code then print_esc("cr")
//! else print_esc("crcr");
//!
//! @ The preamble is copied directly, except that \.{\\tabskip} causes a change
//! to the tabskip glue, thereby possibly expanding macros that immediately
//! follow it. An appearance of \.{\\span} also causes such an expansion.
//!
//! Note that if the preamble contains `\.{\\global\\tabskip}', the `\.{\\global}'
//! token survives in the preamble and the `\.{\\tabskip}' defines new
//! tabskip glue (locally).
//!
//! @<Declare the procedure called |get_preamble_token|@>=
//! procedure get_preamble_token;
//! label restart;
//! begin restart: get_token;
//! while (cur_chr=span_code)and(cur_cmd=tab_mark) do
//! begin get_token; {this token will be expanded once}
//! if cur_cmd>max_command then
//! begin expand; get_token;
//! end;
//! end;
//! if cur_cmd=endv then
//! fatal_error("(interwoven alignment preambles are not allowed)");
//! @.interwoven alignment preambles...@>
//! if (cur_cmd=assign_glue)and(cur_chr=glue_base+tab_skip_code) then
//! begin scan_optional_equals; scan_glue(glue_val);
//! if global_defs>0 then geq_define(glue_base+tab_skip_code,glue_ref,cur_val)
//! else eq_define(glue_base+tab_skip_code,glue_ref,cur_val);
//! goto restart;
//! end;
//! end;
//!
//! @ Spaces are eliminated from the beginning of a template.
//!
//! @<Scan the template \<u_j>...@>=
//! p:=hold_head; link(p):=null;
//! loop@+ begin get_preamble_token;
//! if cur_cmd=mac_param then goto done1;
//! if (cur_cmd<=car_ret)and(cur_cmd>=tab_mark)and(align_state=-1000000) then
//! if (p=hold_head)and(cur_loop=null)and(cur_cmd=tab_mark)
//! then cur_loop:=cur_align
//! else begin print_err("Missing # inserted in alignment preamble");
//! @.Missing \# inserted...@>
//! help3("There should be exactly one # between &'s, when an")@/
//! ("\halign or \valign is being set up. In this case you had")@/
//! ("none, so I've put one in; maybe that will work.");
//! back_error; goto done1;
//! end
//! else if (cur_cmd<>spacer)or(p<>hold_head) then
//! begin link(p):=get_avail; p:=link(p); info(p):=cur_tok;
//! end;
//! end;
//! done1:
//!
//! @ @<Scan the template \<v_j>...@>=
//! p:=hold_head; link(p):=null;
//! loop@+ begin continue: get_preamble_token;
//! if (cur_cmd<=car_ret)and(cur_cmd>=tab_mark)and(align_state=-1000000) then
//! goto done2;
//! if cur_cmd=mac_param then
//! begin print_err("Only one # is allowed per tab");
//! @.Only one \# is allowed...@>
//! help3("There should be exactly one # between &'s, when an")@/
//! ("\halign or \valign is being set up. In this case you had")@/
//! ("more than one, so I'm ignoring all but the first.");
//! error; goto continue;
//! end;
//! link(p):=get_avail; p:=link(p); info(p):=cur_tok;
//! end;
//! done2: link(p):=get_avail; p:=link(p);
//! info(p):=end_template_token {put \.{\\endtemplate} at the end}
//!
//! @ The tricky part about alignments is getting the templates into the
//! scanner at the right time, and recovering control when a row or column
//! is finished.
//!
//! We usually begin a row after each \.{\\cr} has been sensed, unless that
//! \.{\\cr} is followed by \.{\\noalign} or by the right brace that terminates
//! the alignment. The |align_peek| routine is used to look ahead and do
//! the right thing; it either gets a new row started, or gets a \.{\\noalign}
//! started, or finishes off the alignment.
//!
//! @<Declare the procedure called |align_peek|@>=
//! procedure align_peek;
//! label restart;
//! begin restart: align_state:=1000000; @<Get the next non-blank non-call token@>;
//! if cur_cmd=no_align then
//! begin scan_left_brace; new_save_level(no_align_group);
//! if mode=-vmode then normal_paragraph;
//! end
//! else if cur_cmd=right_brace then fin_align
//! else if (cur_cmd=car_ret)and(cur_chr=cr_cr_code) then
//! goto restart {ignore \.{\\crcr}}
//! else begin init_row; {start a new row}
//! init_col; {start a new column and replace what we peeked at}
//! end;
//! end;
//!
//! @ To start a row (i.e., a `row' that rhymes with `dough' but not with `bough'),
//! we enter a new semantic level, copy the first tabskip glue, and change
//! from internal vertical mode to restricted horizontal mode or vice versa.
//! The |space_factor| and |prev_depth| are not used on this semantic level,
//! but we clear them to zero just to be tidy.
//!
//! @p @t\4@>@<Declare the procedure called |init_span|@>@t@>@/
//! procedure init_row;
//! begin push_nest; mode:=(-hmode-vmode)-mode;
//! if mode=-hmode then space_factor:=0 @+else prev_depth:=0;
//! tail_append(new_glue(glue_ptr(preamble)));
//! subtype(tail):=tab_skip_code+1;@/
//! cur_align:=link(preamble); cur_tail:=cur_head; init_span(cur_align);
//! end;
//!
//! @ The parameter to |init_span| is a pointer to the alignrecord where the
//! next column or group of columns will begin. A new semantic level is
//! entered, so that the columns will generate a list for subsequent packaging.
//!
//! @<Declare the procedure called |init_span|@>=
//! procedure init_span(@!p:pointer);
//! begin push_nest;
//! if mode=-hmode then space_factor:=1000
//! else begin prev_depth:=ignore_depth; normal_paragraph;
//! end;
//! cur_span:=p;
//! end;
//!
//! @ When a column begins, we assume that |cur_cmd| is either |omit| or else
//! the current token should be put back into the input until the \<u_j>
//! template has been scanned. (Note that |cur_cmd| might be |tab_mark| or
//! |car_ret|.) We also assume that |align_state| is approximately 1000000 at
//! this time. We remain in the same mode, and start the template if it is
//! called for.
//!
//! @p procedure init_col;
//! begin extra_info(cur_align):=cur_cmd;
//! if cur_cmd=omit then align_state:=0
//! else begin back_input; begin_token_list(u_part(cur_align),u_template);
//! end; {now |align_state=1000000|}
//! end;
//! | //! @p @t\4@>@<Declare the procedure called |get_preamble_token|@>@t@>@/ | random_line_split |
http.rs | time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn post<F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if!coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" |
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if!status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3,'');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if!http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len()!= 3 ||!code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line|!line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()). | {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
} | conditional_block |
http.rs | we time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn post<F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if!coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?; | if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if!status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3,'');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if!http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len()!= 3 ||!code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line|!line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap | random_line_split |
|
http.rs | time out, so the maximum time we allow Bitcoin Core to block for is twice this
/// value.
const TCP_STREAM_RESPONSE_TIMEOUT: Duration = Duration::from_secs(300);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
address: SocketAddr,
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { address, stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn | <F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(_) => {
// Reconnect and retry on fail. This can happen if the connection was closed after
// the keep-alive limits are reached, or generally if the request timed out due to
// Bitcoin Core being stuck on a long-running operation or its RPC queue being
// full.
// Block 100ms before retrying the request as in many cases the source of the error
// may be persistent for some time.
#[cfg(feature = "tokio")]
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(not(feature = "tokio"))]
std::thread::sleep(Duration::from_millis(100));
*self = Self::connect(self.address)?;
self.send_request(request).await
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line {
() => { read_line!(0) };
($retry_count: expr) => { {
let mut line = String::new();
let mut timeout_count: u64 = 0;
let bytes_read = loop {
#[cfg(feature = "tokio")]
let read_res = reader.read_line(&mut line).await;
#[cfg(not(feature = "tokio"))]
let read_res = reader.read_line(&mut line);
match read_res {
Ok(bytes_read) => break bytes_read,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
timeout_count += 1;
if timeout_count > $retry_count {
return Err(e);
} else {
continue;
}
}
Err(e) => return Err(e),
}
};
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} }
}
// Read and parse status line
// Note that we allow retrying a few times to reach TCP_STREAM_RESPONSE_TIMEOUT.
let status_line = read_line!(TCP_STREAM_RESPONSE_TIMEOUT.as_secs() / TCP_STREAM_TIMEOUT.as_secs())
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
let contents = match message_length {
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
content
}
},
HttpMessageLength::TransferEncoding(coding) => {
if!coding.eq_ignore_ascii_case("chunked") {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
content
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
content
}
}
},
};
if!status.is_ok() {
// TODO: Handle 3xx redirection responses.
let error = HttpError {
status_code: status.code.to_string(),
contents,
};
return Err(std::io::Error::new(std::io::ErrorKind::Other, error));
}
Ok(contents)
}
}
/// HTTP error consisting of a status code and body contents.
#[derive(Debug)]
pub(crate) struct HttpError {
pub(crate) status_code: String,
pub(crate) contents: Vec<u8>,
}
impl std::error::Error for HttpError {}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let contents = String::from_utf8_lossy(&self.contents);
write!(f, "status_code: {}, contents: {}", self.status_code, contents)
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3,'');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if!http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len()!= 3 ||!code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
}
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
fn responding_with_body<T: ToString>(status: &str, body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => format!("{}\r\n\r\n", status),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"{}\r\n\
Content-Length: {}\r\n\
\r\n\
{}", status, body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"{}\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", status, String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
HttpServer::responding_with_body("HTTP/1.1 200 OK", body)
}
pub fn responding_with_not_found() -> Self {
HttpServer::responding_with_body::<String>("HTTP/1.1 404 Not Found", MessageBody::Empty)
}
pub fn responding_with_server_error<T: ToString>(content: T) -> Self {
let body = MessageBody::Content(content);
HttpServer::responding_with_body("HTTP/1.1 500 Internal Server Error", body)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line|!line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => {
assert!(e.to_string().contains("failed to lookup address information") ||
e.to_string().contains("No such host"), "{:?}", e);
},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_error() {
let server = HttpServer::responding_with_server_error("foo");
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<JsonResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
let http_error = e.into_inner().unwrap().downcast::<HttpError>().unwrap();
assert_eq!(http_error.status_code, "500");
assert_eq!(http_error.contents, "foo".as_bytes());
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()). | post | identifier_name |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn | (&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if!sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.send(buttplug_fut_msg.0.clone());
} else {
panic!("Can't send message yet!");
}
}
}
}
}
}
}
| get_remote_send | identifier_name |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if!sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send | else {
panic!("Can't send message yet!");
}
}
}
}
}
}
}
| {
remote_sender.send(buttplug_fut_msg.0.clone());
} | conditional_block |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
| // We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if!sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.send(buttplug_fut_msg.0.clone());
} else {
panic!("Can't send message yet!");
}
}
}
}
}
}
} | loop { | random_line_split |
mod.rs | // Buttplug Rust Source Code File - See https://buttplug.io for more info.
//
// Copyright 2016-2019 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//! Handling of communication with Buttplug Server.
pub mod messagesorter;
#[cfg(any(feature = "client-ws", feature = "client-ws-ssl"))]
pub mod websocket;
#[cfg(feature = "server")]
use crate::server::ButtplugServer;
use crate::{
client::internal::{
ButtplugClientFuture, ButtplugClientFutureState, ButtplugClientFutureStateShared,
ButtplugClientMessageStateShared,
},
core::messages::ButtplugMessageUnion,
};
use async_std::sync::{channel, Receiver};
#[cfg(feature = "serialize_json")]
use async_std::{
prelude::{FutureExt, StreamExt},
sync::Sender,
};
use async_trait::async_trait;
#[cfg(feature = "serialize_json")]
use futures::future::Future;
#[cfg(feature = "serialize_json")]
use messagesorter::ClientConnectorMessageSorter;
use std::{error::Error, fmt};
pub type ButtplugClientConnectionState =
ButtplugClientFutureState<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionStateShared =
ButtplugClientFutureStateShared<Result<(), ButtplugClientConnectorError>>;
pub type ButtplugClientConnectionFuture =
ButtplugClientFuture<Result<(), ButtplugClientConnectorError>>;
#[derive(Debug, Clone)]
pub struct ButtplugClientConnectorError {
pub message: String,
}
impl ButtplugClientConnectorError {
pub fn new(msg: &str) -> Self {
Self {
message: msg.to_owned(),
}
}
}
impl fmt::Display for ButtplugClientConnectorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Init Error: {}", self.message)
}
}
impl Error for ButtplugClientConnectorError {
fn description(&self) -> &str {
self.message.as_str()
}
fn source(&self) -> Option<&(dyn Error +'static)> {
None
}
}
// Not real sure if this is sync, since there may be state that could get weird
// in connectors implementing this trait, but Send should be ok.
#[async_trait]
pub trait ButtplugClientConnector: Send {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError>;
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared);
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion>;
}
#[cfg(feature = "server")]
pub struct ButtplugEmbeddedClientConnector {
server: ButtplugServer,
recv: Option<Receiver<ButtplugMessageUnion>>,
}
#[cfg(feature = "server")]
impl ButtplugEmbeddedClientConnector {
pub fn new(name: &str, max_ping_time: u32) -> Self {
let (send, recv) = channel(256);
Self {
recv: Some(recv),
server: ButtplugServer::new(&name, max_ping_time, send),
}
}
}
#[cfg(feature = "server")]
#[async_trait]
impl ButtplugClientConnector for ButtplugEmbeddedClientConnector {
async fn connect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn disconnect(&mut self) -> Result<(), ButtplugClientConnectorError> {
Ok(())
}
async fn send(&mut self, msg: &ButtplugMessageUnion, state: &ButtplugClientMessageStateShared) {
let ret_msg = self.server.send_message(msg).await;
let mut waker_state = state.lock().unwrap();
waker_state.set_reply(ret_msg.unwrap());
}
fn get_event_receiver(&mut self) -> Receiver<ButtplugMessageUnion> {
// This will panic if we've already taken the receiver.
self.recv.take().unwrap()
}
}
// The embedded connector is used heavily in the client unit tests, so we can
// assume code coverage there and omit specific tests here.
pub trait ButtplugRemoteClientConnectorSender: Sync + Send {
fn send(&self, msg: ButtplugMessageUnion);
fn close(&self);
}
pub enum ButtplugRemoteClientConnectorMessage {
Sender(Box<dyn ButtplugRemoteClientConnectorSender>),
Connected(),
Text(String),
Error(String),
ClientClose(String),
Close(String),
}
#[cfg(feature = "serialize_json")]
pub struct ButtplugRemoteClientConnectorHelper {
// Channel send/recv pair for applications wanting to send out through the
// remote connection. Receiver will be send to task on creation.
internal_send: Sender<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>,
internal_recv: Option<Receiver<(ButtplugMessageUnion, ButtplugClientMessageStateShared)>>,
// Channel send/recv pair for remote connection sending information to the
// application. Receiver will be sent to task on creation.
remote_send: Sender<ButtplugRemoteClientConnectorMessage>,
remote_recv: Option<Receiver<ButtplugRemoteClientConnectorMessage>>,
event_send: Option<Sender<ButtplugMessageUnion>>,
}
#[cfg(feature = "serialize_json")]
unsafe impl Send for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
unsafe impl Sync for ButtplugRemoteClientConnectorHelper {}
#[cfg(feature = "serialize_json")]
impl ButtplugRemoteClientConnectorHelper {
pub fn new(event_sender: Sender<ButtplugMessageUnion>) -> Self {
let (internal_send, internal_recv) = channel(256);
let (remote_send, remote_recv) = channel(256);
Self {
event_send: Some(event_sender),
remote_send,
remote_recv: Some(remote_recv),
internal_send,
internal_recv: Some(internal_recv),
}
}
pub fn get_remote_send(&self) -> Sender<ButtplugRemoteClientConnectorMessage> {
self.remote_send.clone()
}
pub async fn send(
&mut self,
msg: &ButtplugMessageUnion,
state: &ButtplugClientMessageStateShared,
) {
self.internal_send.send((msg.clone(), state.clone())).await;
}
pub async fn close(&self) {
// Emulate a close from the connector side, which will cause us to
// close.
self.remote_send
.send(ButtplugRemoteClientConnectorMessage::Close(
"Client requested close.".to_owned(),
))
.await;
}
pub fn get_recv_future(&mut self) -> impl Future | loop {
// We use two Options instead of an enum because we may never
// get anything.
let mut stream_return: StreamValue = async {
match remote_recv.next().await {
Some(msg) => StreamValue::Incoming(msg),
None => StreamValue::NoValue,
}
}
.race(async {
match internal_recv.next().await {
Some(msg) => StreamValue::Outgoing(msg),
None => StreamValue::NoValue,
}
})
.await;
match stream_return {
StreamValue::NoValue => break,
StreamValue::Incoming(remote_msg) => {
match remote_msg {
ButtplugRemoteClientConnectorMessage::Sender(s) => {
remote_send = Some(s);
}
ButtplugRemoteClientConnectorMessage::Text(t) => {
let array: Vec<ButtplugMessageUnion> =
serde_json::from_str(&t.clone()).unwrap();
for smsg in array {
if!sorter.maybe_resolve_message(&smsg) {
info!("Sending event!");
// Send notification through event channel
event_send.send(smsg).await;
}
}
}
ButtplugRemoteClientConnectorMessage::ClientClose(s) => {
info!("Client closing connection {}", s);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.close();
} else {
panic!("Can't send message yet!");
}
}
ButtplugRemoteClientConnectorMessage::Close(s) => {
info!("Connector closing connection {}", s);
break;
}
_ => {
panic!("UNHANDLED BRANCH");
}
}
}
StreamValue::Outgoing(ref mut buttplug_fut_msg) => {
// Create future sets our message ID, so make sure this
// happens before we send out the message.
sorter.register_future(&mut buttplug_fut_msg.0, &buttplug_fut_msg.1);
if let Some(ref mut remote_sender) = remote_send {
remote_sender.send(buttplug_fut_msg.0.clone());
} else {
panic!("Can't send message yet!");
}
}
}
}
}
}
}
| {
// Set up a way to get futures in and out of the sorter, which will live
// in our connector task.
let event_send = self.event_send.take().unwrap();
// Remove the receivers we need to move into the task.
let mut remote_recv = self.remote_recv.take().unwrap();
let mut internal_recv = self.internal_recv.take().unwrap();
async move {
let mut sorter = ClientConnectorMessageSorter::default();
// Our in-task remote sender, which is a wrapped version of whatever
// bus specific sender (websocket, tcp, etc) we'll be using.
let mut remote_send: Option<Box<dyn ButtplugRemoteClientConnectorSender>> = None;
enum StreamValue {
NoValue,
Incoming(ButtplugRemoteClientConnectorMessage),
Outgoing((ButtplugMessageUnion, ButtplugClientMessageStateShared)),
}
| identifier_body |
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if!m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else | ;
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() &&!c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o!= "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l!= "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b")
.multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
}
| {
PackageIdent::from_str(pkg_ident_str)?
} | conditional_block |
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn | (ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if!m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() &&!c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o!= "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l!= "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b")
.multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
}
| start | identifier_name |
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if!m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() &&!c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o!= "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l!= "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b") | .multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
} | random_line_split |
|
main.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate clap;
extern crate env_logger;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate habitat_pkg_export_docker as export_docker;
extern crate handlebars;
extern crate rand;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use handlebars::Handlebars;
use std::env;
use std::result;
use std::str::FromStr;
use std::io::prelude::*;
use std::io;
use std::fs::File;
use std::path::Path;
use hcore::channel;
use hcore::PROGRAM_NAME;
use hcore::url as hurl;
use hcore::env as henv;
use hcore::package::{PackageArchive, PackageIdent};
use common::ui::{Coloring, UI, NOCOLORING_ENVVAR, NONINTERACTIVE_ENVVAR};
use rand::Rng;
use export_docker::{Cli, Credentials, BuildSpec, Naming, Result};
// Synced with the version of the Habitat operator.
pub const VERSION: &'static str = "0.1.0";
// Kubernetes manifest template
const MANIFESTFILE: &'static str = include_str!("../defaults/KubernetesManifest.hbs");
const BINDFILE: &'static str = include_str!("../defaults/KubernetesBind.hbs");
#[derive(Debug, Fail)]
enum Error {
#[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
}
fn main() {
env_logger::init().unwrap();
let mut ui = get_ui();
if let Err(e) = start(&mut ui) {
let _ = ui.fatal(e);
std::process::exit(1)
}
}
fn get_ui() -> UI {
let isatty = if henv::var(NONINTERACTIVE_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Some(false)
} else {
None
};
let coloring = if henv::var(NOCOLORING_ENVVAR)
.map(|val| val == "true")
.unwrap_or(false)
{
Coloring::Never
} else {
Coloring::Auto
};
UI::default_with(coloring, isatty)
}
fn start(ui: &mut UI) -> Result<()> {
let m = cli().get_matches();
debug!("clap cli args: {:?}", m);
if!m.is_present("NO_DOCKER_IMAGE") {
gen_docker_img(ui, &m)?;
}
gen_k8s_manifest(ui, &m)
}
fn gen_docker_img(ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let default_channel = channel::default();
let default_url = hurl::default_bldr_url();
let spec = BuildSpec::new_from_cli_matches(&matches, &default_channel, &default_url);
let naming = Naming::new_from_cli_matches(&matches);
let docker_image = export_docker::export(ui, spec, &naming)?;
docker_image.create_report(
ui,
env::current_dir()?.join("results"),
)?;
if matches.is_present("PUSH_IMAGE") {
let credentials = Credentials::new(
naming.registry_type,
matches.value_of("REGISTRY_USERNAME").unwrap(),
matches.value_of("REGISTRY_PASSWORD").unwrap(),
)?;
docker_image.push(ui, &credentials, naming.registry_url)?;
}
if matches.is_present("RM_IMAGE") {
docker_image.rm(ui)?;
}
Ok(())
}
fn gen_k8s_manifest(_ui: &mut UI, matches: &clap::ArgMatches) -> Result<()> {
let count = matches.value_of("COUNT").unwrap_or("1");
let topology = matches.value_of("TOPOLOGY").unwrap_or("standalone");
let group = matches.value_of("GROUP");
let config_secret_name = matches.value_of("CONFIG_SECRET_NAME");
let ring_secret_name = matches.value_of("RING_SECRET_NAME");
// clap ensures that we do have the mandatory args so unwrap() is fine here
let pkg_ident_str = matches.value_of("PKG_IDENT_OR_ARTIFACT").unwrap();
let pkg_ident = if Path::new(pkg_ident_str).is_file() {
// We're going to use the `$pkg_origin/$pkg_name`, fuzzy form of a package
// identifier to ensure that update strategies will work if desired
PackageArchive::new(pkg_ident_str).ident()?
} else {
PackageIdent::from_str(pkg_ident_str)?
};
// To allow multiple instances of Habitat application in Kubernetes,
// random suffix in metadata_name is needed.
let metadata_name = format!(
"{}-{}{}",
pkg_ident.name,
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() || c.is_numeric())
.take(4)
.collect::<String>(),
rand::thread_rng()
.gen_ascii_chars()
.filter(|c| c.is_lowercase() &&!c.is_numeric())
.take(1)
.collect::<String>()
);
let image = match matches.value_of("IMAGE_NAME") {
Some(i) => i.to_string(),
None => pkg_ident.origin + "/" + &pkg_ident.name,
};
let bind = matches.value_of("BIND");
let json = json!({
"metadata_name": metadata_name,
"habitat_name": pkg_ident.name,
"image": image,
"count": count,
"service_topology": topology,
"service_group": group,
"config_secret_name": config_secret_name,
"ring_secret_name": ring_secret_name,
"bind": bind,
});
let mut write: Box<Write> = match matches.value_of("OUTPUT") {
Some(o) if o!= "-" => Box::new(File::create(o)?),
_ => Box::new(io::stdout()),
};
let r = Handlebars::new().template_render(MANIFESTFILE, &json)?;
let mut out = r.lines().filter(|l| *l!= "").collect::<Vec<_>>().join(
"\n",
) + "\n";
if let Some(binds) = matches.values_of("BIND") {
for bind in binds {
let split: Vec<&str> = bind.split(":").collect();
if split.len() < 3 {
return Err(Error::InvalidBindSpec(bind.to_string()).into());
}
let json = json!({
"name": split[0],
"service": split[1],
"group": split[2],
});
out += &Handlebars::new().template_render(BINDFILE, &json)?;
}
}
write.write(out.as_bytes())?;
Ok(())
}
fn cli<'a, 'b>() -> App<'a, 'b> {
let name: &str = &*PROGRAM_NAME;
let about = "Creates a Docker image and Kubernetes manifest for a Habitat package. Habitat \
operator must be deployed within the Kubernetes cluster before the generated \
manifest can be applied to this cluster.";
let app = Cli::new(name, about)
.add_base_packages_args()
.add_builder_args()
.add_tagging_args()
.add_publishing_args()
.app;
app.arg(
Arg::with_name("OUTPUT")
.value_name("OUTPUT")
.long("output")
.short("o")
.help(
"Name of manifest file to create. Pass '-' for stdout (default: -)",
),
).arg(
Arg::with_name("COUNT")
.value_name("COUNT")
.long("count")
.validator(valid_natural_number)
.help("Count is the number of desired instances"),
)
.arg(
Arg::with_name("TOPOLOGY")
.value_name("TOPOLOGY")
.long("topology")
.short("t")
.possible_values(&["standalone", "leader"])
.help(
"A topology describes the intended relationship between peers \
within a Habitat service group. Specify either standalone or leader \
topology (default: standalone)",
),
)
.arg(
Arg::with_name("GROUP")
.value_name("GROUP")
.long("service-group")
.short("g")
.help(
"group is a logical grouping of services with the same package and \
topology type connected together in a ring (default: default)",
),
)
.arg(
Arg::with_name("CONFIG_SECRET_NAME")
.value_name("CONFIG_SECRET_NAME")
.long("config-secret-name")
.short("n")
.help(
"name of the Kubernetes Secret containing the config file - \
user.toml - that the user has previously created. Habitat will \
use it for initial configuration of the service",
),
)
.arg(
Arg::with_name("RING_SECRET_NAME")
.value_name("RING_SECRET_NAME")
.long("ring-secret-name")
.short("r")
.help(
"name of the Kubernetes Secret that contains the ring key, which \
encrypts the communication between Habitat supervisors",
),
)
.arg(
Arg::with_name("BIND")
.value_name("BIND")
.long("bind")
.short("b")
.multiple(true)
.number_of_values(1)
.help(
"Bind to another service to form a producer/consumer relationship, \
specified as name:service:group",
),
)
.arg(
Arg::with_name("NO_DOCKER_IMAGE")
.long("no-docker-image")
.short("d")
.help(
"Disable creation of the Docker image and only create a Kubernetes manifest",
),
)
.arg(
Arg::with_name("PKG_IDENT_OR_ARTIFACT")
.value_name("PKG_IDENT_OR_ARTIFACT")
.required(true)
.help("Habitat package identifier (ex: acme/redis)"),
)
}
fn valid_natural_number(val: String) -> result::Result<(), String> | {
match val.parse::<u32>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("{} is not a natural number", val)),
}
} | identifier_body |
|
aes.rs | //! Interface to the AES peripheral.
//!
//! Note that the AES peripheral is only available on some MCUs in the L0/L1/L2
//! families. Check the datasheet for more information.
//!
//! See STM32L0x2 reference manual, chapter 18.
use core::{
convert::TryInto,
ops::{Deref, DerefMut},
pin::Pin,
};
use as_slice::{AsMutSlice, AsSlice};
use nb::block;
use void::Void;
use crate::{
dma,
pac::{
self,
aes::{self, cr},
},
rcc::{Enable, Rcc, Reset},
};
/// Entry point to the AES API
pub struct AES {
aes: pac::AES,
}
impl AES {
/// Initialize the AES peripheral
pub fn new(aes: pac::AES, rcc: &mut Rcc) -> Self {
// Enable peripheral clock
pac::AES::enable(rcc);
// Reset peripheral
pac::AES::reset(rcc);
// Configure peripheral
aes.cr.write(|w| {
// Enable DMA
w.dmaouten().set_bit();
w.dmainen().set_bit();
// Disable interrupts
w.errie().clear_bit();
w.ccfie().clear_bit()
});
Self { aes }
}
/// Enable the AES peripheral
///
/// Returns a [`Stream`] instance which can be used to encrypt or decrypt
/// data using the mode selected with the `mode` argument.
///
/// Consumes the `AES` instance. You can get it back later once you're done
/// with the `Stream`, using [`Stream::disable`].
pub fn enable<M>(self, mode: M, key: [u32; 4]) -> Stream
where
M: Mode,
{
// Write key. This is safe, as the register accepts the full range of
// `u32`.
self.aes.keyr0.write(|w| w.bits(key[0]));
self.aes.keyr1.write(|w| w.bits(key[1]));
self.aes.keyr2.write(|w| w.bits(key[2]));
self.aes.keyr3.write(|w| w.bits(key[3]));
mode.prepare(&self.aes);
self.aes.cr.modify(|_, w| {
// Select mode
mode.select(w);
// Configure for stream of bytes
// Safe, as we write a valid byte pattern.
w.datatype().bits(0b10);
// Enable peripheral
w.en().set_bit()
});
Stream {
aes: self,
rx: Rx(()),
tx: Tx(()),
}
}
}
/// An active encryption/decryption stream
///
/// You can get an instance of this struct by calling [`AES::enable`].
pub struct Stream {
aes: AES,
/// Can be used to write data to the AES peripheral
pub tx: Tx,
/// Can be used to read data from the AES peripheral
pub rx: Rx,
}
impl Stream {
/// Processes one block of data
///
/// Writes one block of data to the AES peripheral, wait until it is
/// processed then reads the processed block and returns it.
///
/// Whether this is encryption or decryption depends on the mode that was
/// selected when this `Stream` was created.
pub fn process(&mut self, input: &Block) -> Result<Block, Error> {
self.tx.write(input)?;
// Can't panic. Error value of `Rx::read` is `Void`.
let output = block!(self.rx.read()).unwrap();
Ok(output)
}
/// Disable the AES peripheral
///
/// Consumes the stream and returns the disabled [`AES`] instance. Call this
/// method when you're done encrypting/decrypting data. You can then create
/// another `Stream` using [`AES::enable`].
pub fn disable(self) -> AES {
// Disable AES
self.aes.aes.cr.modify(|_, w| w.en().clear_bit());
self.aes
}
}
/// Can be used to write data to the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Tx(());
impl Tx {
/// Write a block to the AES peripheral
///
/// Please note that only one block can be written before you need to read
/// the processed block back using [`Read::read`]. Calling this method
/// multiple times without calling [`Read::read`] in between will result in
/// an error to be returned.
pub fn write(&mut self, block: &Block) -> Result<(), Error> {
// Get access to the registers. This is safe, because:
// - `Tx` has exclusive access to DINR.
// - We only use SR for an atomic read.
let (dinr, sr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.dinr, &aes.sr)
};
// Write input data to DINR
//
// See STM32L0x2 reference manual, section 18.4.10.
for i in (0..4).rev() {
dinr.write(|w| {
let i = i * 4;
let word = &block[i..i + 4];
// Can't panic, because `word` is 4 bytes long.
let word = word.try_into().unwrap();
let word = u32::from_le_bytes(word);
w.bits(word)
});
}
// Was there an unexpected write? If so, a computation is already
// ongoing and the user needs to call `Rx::read` next. If I understand
// the documentation correctly, our writes to the register above
// shouldn't have affected the ongoing computation.
if sr.read().wrerr().bit_is_set() {
return Err(Error::Busy);
}
Ok(())
}
/// Writes the provided buffer to the AES peripheral using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn | <Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: Deref +'static,
Buffer::Target: AsSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.dinr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be read from.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be lower than the priority of the
// transfer created in `read_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::high(),
dma::Direction::memory_to_peripheral(),
)
}
}
}
/// Can be used to read data from the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Rx(());
impl Rx {
pub fn read(&mut self) -> nb::Result<Block, Void> {
// Get access to the registers. This is safe, because:
// - We only use SR for an atomic read.
// - `Rx` has exclusive access to DOUTR.
// - While it exists, `Rx` has exlusive access to CR.
let (sr, doutr, cr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.sr, &aes.doutr, &aes.cr)
};
// Is a computation complete?
if sr.read().ccf().bit_is_clear() {
return Err(nb::Error::WouldBlock);
}
// Read output data from DOUTR
//
// See STM32L0x2 reference manual, section 18.4.10.
let mut block = [0; 16];
for i in (0..4).rev() {
let i = i * 4;
let word = doutr.read().bits();
let word = word.to_le_bytes();
(block[i..i + 4]).copy_from_slice(&word);
}
// Clear CCF flag
cr.modify(|_, w| w.ccfc().set_bit());
Ok(block)
}
/// Reads data from the AES peripheral into the provided buffer using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn read_all<Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: DerefMut +'static,
Buffer::Target: AsMutSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.doutr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be written to.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be higher than the priority of the
// transfer created in `write_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::very_high(),
dma::Direction::peripheral_to_memory(),
)
}
}
}
/// Implemented for all chaining modes
///
/// This is mostly an internal trait. The user won't typically need to use or
/// implement this, except to call the various static methods that create a
/// mode.
pub trait Mode {
fn prepare(&self, _: &aes::RegisterBlock);
fn select(&self, _: &mut cr::W);
}
impl dyn Mode {
/// Use this with [`AES::enable`] to encrypt using ECB
pub fn ecb_encrypt() -> ECB<Encrypt> {
ECB(Encrypt)
}
/// Use this with [`AES::enable`] to decrypt using ECB
pub fn ecb_decrypt() -> ECB<Decrypt> {
ECB(Decrypt)
}
/// Use this with [`AES::enable`] to encrypt using CBC
pub fn cbc_encrypt(init_vector: [u32; 4]) -> CBC<Encrypt> {
CBC {
_mode: Encrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to decrypt using CBC
pub fn cbc_decrypt(init_vector: [u32; 4]) -> CBC<Decrypt> {
CBC {
_mode: Decrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to encrypt or decrypt using CTR
pub fn ctr(init_vector: [u32; 3]) -> CTR {
CTR { init_vector }
}
}
/// The ECB (electronic code book) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using ECB
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::ecb_encrypt`] or
/// [`Mode::ecb_decrypt`].
pub struct ECB<Mode>(Mode);
impl Mode for ECB<Encrypt> {
fn prepare(&self, _: &aes::RegisterBlock) {
// Nothing to do.
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for ECB<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes)
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CBC (cipher block chaining) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CBC
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::cbc_encrypt`] or
/// [`Mode::cbc_decrypt`].
pub struct CBC<Mode> {
_mode: Mode,
init_vector: [u32; 4],
}
impl Mode for CBC<Encrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for CBC<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes);
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CTR (counter) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CTR
/// mode. In CTR mode, encryption and decryption are technically identical, so
/// further qualification is not required.
///
/// You gen get an instance of this struct via [`Mode::ctr`].
pub struct CTR {
init_vector: [u32; 3],
}
impl Mode for CTR {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Initialize initialization vector
//
// See STM32L0x2 reference manual, table 78 on page 408.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(0x0001)); // counter
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select Counter Mode (CTR) mode
w.chmod().bits(0b10);
// These bits mean encryption mode, but in CTR mode,
// encryption and descryption are technically identical, so this
// is fine for either mode.
w.mode().bits(0b00);
}
}
}
fn derive_key(aes: &aes::RegisterBlock) {
// Select key derivation mode. This is safe, as we're writing a valid bit
// pattern.
aes.cr.modify(|_, w| w.mode().bits(0b01));
// Enable the peripheral. It will be automatically disabled again once the
// key has been derived.
aes.cr.modify(|_, w| w.en().set_bit());
// Wait for key derivation to finish
while aes.sr.read().ccf().bit_is_clear() {}
}
/// Used to identify encryption mode
pub struct Encrypt;
/// Used to identify decryption mode
pub struct Decrypt;
/// A 128-bit block
///
/// The AES peripheral processes 128 bits at a time, so this represents one unit
/// of processing.
pub type Block = [u8; 16];
#[derive(Debug)]
pub enum Error {
/// AES peripheral is busy
Busy,
}
/// Wrapper around a [`dma::Transfer`].
///
/// This struct is required, because under the hood, the AES peripheral only
/// supports 32-bit word DMA transfers, while the public API works with byte
/// slices.
pub struct Transfer<Target, Channel, Buffer, State> {
buffer: Pin<Buffer>,
inner: dma::Transfer<Target, Channel, dma::PtrBuffer<u32>, State>,
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Ready>
where
Target: dma::Target<Channel>,
Channel: dma::Channel,
Buffer: Deref +'static,
Buffer::Target: AsSlice<Element = u8>,
{
/// Create a new instance of `Transfer`
///
/// # Safety
///
/// If this is used to prepare a memory-to-peripheral transfer, the caller
/// must make sure that the buffer can be read from safely.
///
/// If this is used to prepare a peripheral-to-memory transfer, the caller
/// must make sure that the buffer can be written to safely.
///
/// The caller must guarantee that the buffer length is a multiple of 4.
unsafe fn new(
dma: &mut dma::Handle,
target: Target,
channel: Channel,
buffer: Pin<Buffer>,
address: u32,
priority: dma::Priority,
dir: dma::Direction,
) -> Self {
let num_words = buffer.as_slice().len() / 4;
let transfer = dma::Transfer::new(
dma,
target,
channel,
// The caller must guarantee that our length is a multiple of 4, so
// this should be fine.
Pin::new(dma::PtrBuffer {
ptr: buffer.as_slice().as_ptr() as *const u32,
len: num_words,
}),
num_words,
address,
priority,
dir,
false,
);
Self {
buffer,
inner: transfer,
}
}
/// Enables the provided interrupts
///
/// This setting only affects this transfer. It doesn't affect transfer on
/// other channels, or subsequent transfers on the same channel.
pub fn enable_interrupts(&mut self, interrupts: dma::Interrupts) {
self.inner.enable_interrupts(interrupts)
}
/// Start the DMA transfer
///
/// Consumes this instance of `Transfer` and returns a new one, with its
/// state changes to indicate that the transfer has been started.
pub fn start(self) -> Transfer<Target, Channel, Buffer, dma::Started> {
Transfer {
buffer: self.buffer,
inner: self.inner.start(),
}
}
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Started>
where
Channel: dma::Channel,
{
/// Indicates whether the transfer is still ongoing
pub fn is_active(&self) -> bool {
self.inner.is_active()
}
/// Waits for the transfer to finish and returns the owned resources
///
/// This function will busily wait until the transfer is finished. If you
/// don't want this, please call this function only once you know that the
/// transfer has finished.
///
/// This function will return immediately, if [`Transfer::is_active`]
/// returns `false`.
pub fn wait(self) -> dma::TransferResourcesResult<Target, Channel, Buffer> {
let (res, err) = match self.inner.wait() {
Ok(res) => (res, None),
Err((res, err)) => (res, Some(err)),
};
let res = dma::TransferResources {
target: res.target,
channel: res.channel,
buffer: self.buffer,
};
match err {
None => Ok(res),
Some(err) => Err((res, err)),
}
}
}
| write_all | identifier_name |
aes.rs | //! Interface to the AES peripheral.
//!
//! Note that the AES peripheral is only available on some MCUs in the L0/L1/L2
//! families. Check the datasheet for more information.
//!
//! See STM32L0x2 reference manual, chapter 18.
use core::{
convert::TryInto,
ops::{Deref, DerefMut},
pin::Pin,
};
use as_slice::{AsMutSlice, AsSlice};
use nb::block;
use void::Void;
use crate::{
dma,
pac::{
self,
aes::{self, cr},
},
rcc::{Enable, Rcc, Reset},
};
/// Entry point to the AES API
pub struct AES {
aes: pac::AES,
}
impl AES {
/// Initialize the AES peripheral
pub fn new(aes: pac::AES, rcc: &mut Rcc) -> Self {
// Enable peripheral clock
pac::AES::enable(rcc);
// Reset peripheral
pac::AES::reset(rcc);
// Configure peripheral
aes.cr.write(|w| {
// Enable DMA
w.dmaouten().set_bit();
w.dmainen().set_bit();
// Disable interrupts
w.errie().clear_bit();
w.ccfie().clear_bit()
});
Self { aes }
}
/// Enable the AES peripheral
///
/// Returns a [`Stream`] instance which can be used to encrypt or decrypt
/// data using the mode selected with the `mode` argument.
///
/// Consumes the `AES` instance. You can get it back later once you're done
/// with the `Stream`, using [`Stream::disable`].
pub fn enable<M>(self, mode: M, key: [u32; 4]) -> Stream
where
M: Mode,
{
// Write key. This is safe, as the register accepts the full range of
// `u32`.
self.aes.keyr0.write(|w| w.bits(key[0]));
self.aes.keyr1.write(|w| w.bits(key[1]));
self.aes.keyr2.write(|w| w.bits(key[2]));
self.aes.keyr3.write(|w| w.bits(key[3]));
mode.prepare(&self.aes);
self.aes.cr.modify(|_, w| {
// Select mode
mode.select(w);
// Configure for stream of bytes
// Safe, as we write a valid byte pattern.
w.datatype().bits(0b10);
// Enable peripheral
w.en().set_bit()
});
Stream {
aes: self, | }
}
/// An active encryption/decryption stream
///
/// You can get an instance of this struct by calling [`AES::enable`].
pub struct Stream {
aes: AES,
/// Can be used to write data to the AES peripheral
pub tx: Tx,
/// Can be used to read data from the AES peripheral
pub rx: Rx,
}
impl Stream {
/// Processes one block of data
///
/// Writes one block of data to the AES peripheral, wait until it is
/// processed then reads the processed block and returns it.
///
/// Whether this is encryption or decryption depends on the mode that was
/// selected when this `Stream` was created.
pub fn process(&mut self, input: &Block) -> Result<Block, Error> {
self.tx.write(input)?;
// Can't panic. Error value of `Rx::read` is `Void`.
let output = block!(self.rx.read()).unwrap();
Ok(output)
}
/// Disable the AES peripheral
///
/// Consumes the stream and returns the disabled [`AES`] instance. Call this
/// method when you're done encrypting/decrypting data. You can then create
/// another `Stream` using [`AES::enable`].
pub fn disable(self) -> AES {
// Disable AES
self.aes.aes.cr.modify(|_, w| w.en().clear_bit());
self.aes
}
}
/// Can be used to write data to the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Tx(());
impl Tx {
/// Write a block to the AES peripheral
///
/// Please note that only one block can be written before you need to read
/// the processed block back using [`Read::read`]. Calling this method
/// multiple times without calling [`Read::read`] in between will result in
/// an error to be returned.
pub fn write(&mut self, block: &Block) -> Result<(), Error> {
// Get access to the registers. This is safe, because:
// - `Tx` has exclusive access to DINR.
// - We only use SR for an atomic read.
let (dinr, sr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.dinr, &aes.sr)
};
// Write input data to DINR
//
// See STM32L0x2 reference manual, section 18.4.10.
for i in (0..4).rev() {
dinr.write(|w| {
let i = i * 4;
let word = &block[i..i + 4];
// Can't panic, because `word` is 4 bytes long.
let word = word.try_into().unwrap();
let word = u32::from_le_bytes(word);
w.bits(word)
});
}
// Was there an unexpected write? If so, a computation is already
// ongoing and the user needs to call `Rx::read` next. If I understand
// the documentation correctly, our writes to the register above
// shouldn't have affected the ongoing computation.
if sr.read().wrerr().bit_is_set() {
return Err(Error::Busy);
}
Ok(())
}
/// Writes the provided buffer to the AES peripheral using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn write_all<Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: Deref +'static,
Buffer::Target: AsSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.dinr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be read from.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be lower than the priority of the
// transfer created in `read_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::high(),
dma::Direction::memory_to_peripheral(),
)
}
}
}
/// Can be used to read data from the AES peripheral
///
/// You can access this struct via [`Stream`].
pub struct Rx(());
impl Rx {
pub fn read(&mut self) -> nb::Result<Block, Void> {
// Get access to the registers. This is safe, because:
// - We only use SR for an atomic read.
// - `Rx` has exclusive access to DOUTR.
// - While it exists, `Rx` has exlusive access to CR.
let (sr, doutr, cr) = unsafe {
let aes = &*pac::AES::ptr();
(&aes.sr, &aes.doutr, &aes.cr)
};
// Is a computation complete?
if sr.read().ccf().bit_is_clear() {
return Err(nb::Error::WouldBlock);
}
// Read output data from DOUTR
//
// See STM32L0x2 reference manual, section 18.4.10.
let mut block = [0; 16];
for i in (0..4).rev() {
let i = i * 4;
let word = doutr.read().bits();
let word = word.to_le_bytes();
(block[i..i + 4]).copy_from_slice(&word);
}
// Clear CCF flag
cr.modify(|_, w| w.ccfc().set_bit());
Ok(block)
}
/// Reads data from the AES peripheral into the provided buffer using DMA
///
/// Returns a DMA transfer that is ready to be started. It needs to be
/// started for anything to happen.
///
/// # Panics
///
/// Panics, if the buffer length is larger than `u16::max_value()`.
///
/// The AES peripheral works with 128-bit blocks, which means the buffer
/// length must be a multiple of 16. Panics, if this is not the case.
///
/// Panics, if the buffer is not aligned to a word boundary.
pub fn read_all<Buffer, Channel>(
self,
dma: &mut dma::Handle,
buffer: Pin<Buffer>,
channel: Channel,
) -> Transfer<Self, Channel, Buffer, dma::Ready>
where
Self: dma::Target<Channel>,
Buffer: DerefMut +'static,
Buffer::Target: AsMutSlice<Element = u8>,
Channel: dma::Channel,
{
assert!(buffer.as_slice().len() % 16 == 0);
// Safe, because we're only taking the address of a register.
let address = &unsafe { &*pac::AES::ptr() }.doutr as *const _ as u32;
// Safe, because the traits bounds of this method guarantee that
// `buffer` can be written to.
unsafe {
Transfer::new(
dma,
self,
channel,
buffer,
address,
// This priority should be higher than the priority of the
// transfer created in `write_all`. I'm not sure how relevant
// that is in practice, but it makes sense, and as I've seen a
// comment to that effect in ST's HAL code, I'd rather be
// careful than risk weird bugs.
dma::Priority::very_high(),
dma::Direction::peripheral_to_memory(),
)
}
}
}
/// Implemented for all chaining modes
///
/// This is mostly an internal trait. The user won't typically need to use or
/// implement this, except to call the various static methods that create a
/// mode.
pub trait Mode {
fn prepare(&self, _: &aes::RegisterBlock);
fn select(&self, _: &mut cr::W);
}
impl dyn Mode {
/// Use this with [`AES::enable`] to encrypt using ECB
pub fn ecb_encrypt() -> ECB<Encrypt> {
ECB(Encrypt)
}
/// Use this with [`AES::enable`] to decrypt using ECB
pub fn ecb_decrypt() -> ECB<Decrypt> {
ECB(Decrypt)
}
/// Use this with [`AES::enable`] to encrypt using CBC
pub fn cbc_encrypt(init_vector: [u32; 4]) -> CBC<Encrypt> {
CBC {
_mode: Encrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to decrypt using CBC
pub fn cbc_decrypt(init_vector: [u32; 4]) -> CBC<Decrypt> {
CBC {
_mode: Decrypt,
init_vector,
}
}
/// Use this with [`AES::enable`] to encrypt or decrypt using CTR
pub fn ctr(init_vector: [u32; 3]) -> CTR {
CTR { init_vector }
}
}
/// The ECB (electronic code book) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using ECB
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::ecb_encrypt`] or
/// [`Mode::ecb_decrypt`].
pub struct ECB<Mode>(Mode);
impl Mode for ECB<Encrypt> {
fn prepare(&self, _: &aes::RegisterBlock) {
// Nothing to do.
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for ECB<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes)
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select ECB chaining mode
w.chmod().bits(0b00);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CBC (cipher block chaining) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CBC
/// mode. `Mode` must be either [`Encrypt`] or [`Decrypt`].
///
/// You gen get an instance of this struct via [`Mode::cbc_encrypt`] or
/// [`Mode::cbc_decrypt`].
pub struct CBC<Mode> {
_mode: Mode,
init_vector: [u32; 4],
}
impl Mode for CBC<Encrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select encryption mode
w.mode().bits(0b00);
}
}
}
impl Mode for CBC<Decrypt> {
fn prepare(&self, aes: &aes::RegisterBlock) {
derive_key(aes);
// Safe, as the registers accept the full range of `u32`.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(self.init_vector[3]));
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select CBC chaining mode
w.chmod().bits(0b01);
// Select decryption mode
w.mode().bits(0b10);
}
}
}
/// The CTR (counter) chaining mode
///
/// Can be passed [`AES::enable`], to start encrypting or decrypting using CTR
/// mode. In CTR mode, encryption and decryption are technically identical, so
/// further qualification is not required.
///
/// You gen get an instance of this struct via [`Mode::ctr`].
pub struct CTR {
init_vector: [u32; 3],
}
impl Mode for CTR {
fn prepare(&self, aes: &aes::RegisterBlock) {
// Initialize initialization vector
//
// See STM32L0x2 reference manual, table 78 on page 408.
aes.ivr3.write(|w| w.bits(self.init_vector[0]));
aes.ivr2.write(|w| w.bits(self.init_vector[1]));
aes.ivr1.write(|w| w.bits(self.init_vector[2]));
aes.ivr0.write(|w| w.bits(0x0001)); // counter
}
fn select(&self, w: &mut cr::W) {
// Safe, as we're only writing valid bit patterns.
unsafe {
// Select Counter Mode (CTR) mode
w.chmod().bits(0b10);
// These bits mean encryption mode, but in CTR mode,
// encryption and descryption are technically identical, so this
// is fine for either mode.
w.mode().bits(0b00);
}
}
}
fn derive_key(aes: &aes::RegisterBlock) {
// Select key derivation mode. This is safe, as we're writing a valid bit
// pattern.
aes.cr.modify(|_, w| w.mode().bits(0b01));
// Enable the peripheral. It will be automatically disabled again once the
// key has been derived.
aes.cr.modify(|_, w| w.en().set_bit());
// Wait for key derivation to finish
while aes.sr.read().ccf().bit_is_clear() {}
}
/// Used to identify encryption mode
pub struct Encrypt;
/// Used to identify decryption mode
pub struct Decrypt;
/// A 128-bit block
///
/// The AES peripheral processes 128 bits at a time, so this represents one unit
/// of processing.
pub type Block = [u8; 16];
#[derive(Debug)]
pub enum Error {
/// AES peripheral is busy
Busy,
}
/// Wrapper around a [`dma::Transfer`].
///
/// This struct is required, because under the hood, the AES peripheral only
/// supports 32-bit word DMA transfers, while the public API works with byte
/// slices.
pub struct Transfer<Target, Channel, Buffer, State> {
buffer: Pin<Buffer>,
inner: dma::Transfer<Target, Channel, dma::PtrBuffer<u32>, State>,
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Ready>
where
Target: dma::Target<Channel>,
Channel: dma::Channel,
Buffer: Deref +'static,
Buffer::Target: AsSlice<Element = u8>,
{
/// Create a new instance of `Transfer`
///
/// # Safety
///
/// If this is used to prepare a memory-to-peripheral transfer, the caller
/// must make sure that the buffer can be read from safely.
///
/// If this is used to prepare a peripheral-to-memory transfer, the caller
/// must make sure that the buffer can be written to safely.
///
/// The caller must guarantee that the buffer length is a multiple of 4.
unsafe fn new(
dma: &mut dma::Handle,
target: Target,
channel: Channel,
buffer: Pin<Buffer>,
address: u32,
priority: dma::Priority,
dir: dma::Direction,
) -> Self {
let num_words = buffer.as_slice().len() / 4;
let transfer = dma::Transfer::new(
dma,
target,
channel,
// The caller must guarantee that our length is a multiple of 4, so
// this should be fine.
Pin::new(dma::PtrBuffer {
ptr: buffer.as_slice().as_ptr() as *const u32,
len: num_words,
}),
num_words,
address,
priority,
dir,
false,
);
Self {
buffer,
inner: transfer,
}
}
/// Enables the provided interrupts
///
/// This setting only affects this transfer. It doesn't affect transfer on
/// other channels, or subsequent transfers on the same channel.
pub fn enable_interrupts(&mut self, interrupts: dma::Interrupts) {
self.inner.enable_interrupts(interrupts)
}
/// Start the DMA transfer
///
/// Consumes this instance of `Transfer` and returns a new one, with its
/// state changes to indicate that the transfer has been started.
pub fn start(self) -> Transfer<Target, Channel, Buffer, dma::Started> {
Transfer {
buffer: self.buffer,
inner: self.inner.start(),
}
}
}
impl<Target, Channel, Buffer> Transfer<Target, Channel, Buffer, dma::Started>
where
Channel: dma::Channel,
{
/// Indicates whether the transfer is still ongoing
pub fn is_active(&self) -> bool {
self.inner.is_active()
}
/// Waits for the transfer to finish and returns the owned resources
///
/// This function will busily wait until the transfer is finished. If you
/// don't want this, please call this function only once you know that the
/// transfer has finished.
///
/// This function will return immediately, if [`Transfer::is_active`]
/// returns `false`.
pub fn wait(self) -> dma::TransferResourcesResult<Target, Channel, Buffer> {
let (res, err) = match self.inner.wait() {
Ok(res) => (res, None),
Err((res, err)) => (res, Some(err)),
};
let res = dma::TransferResources {
target: res.target,
channel: res.channel,
buffer: self.buffer,
};
match err {
None => Ok(res),
Some(err) => Err((res, err)),
}
}
} | rx: Rx(()),
tx: Tx(()),
} | random_line_split |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
}
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if!success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if!self.shutdown {
debug!("[{}] No shutdown msg: spinning back up...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn | (&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
}
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone()));
stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 {
break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
}
| check_mgmt_msg | identifier_name |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> |
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if!success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if!self.shutdown {
debug!("[{}] No shutdown msg: spinning back up...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn check_mgmt_msg(&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
}
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone()));
stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 {
break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
}
| {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
} | identifier_body |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
}
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if!success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if!self.shutdown {
debug!("[{}] No shutdown msg: spinning back up...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn check_mgmt_msg(&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
}
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone())); | break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
} | stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 { | random_line_split |
peer.rs | use std::io::BufferedReader;
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::option::Option;
use std::io::timer::sleep;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::super::events::*;
use super::parsers::{read_rpc, as_network_msg, make_id_bytes};
use super::types::*;
static CONNECT_TIMEOUT: u64 = 3000;
// Each peer should have one of these, and they should be consistent across
// nodes.
pub struct NetPeer<'a> {
pub id: u64,
pub conf: NetPeerConfig<'a>,
// If we have an open connection to this peer, then this will be Some(...).
pub stream: Option<TcpStream>,
to_raft: Sender<RaftMsg>,
mgmt_port: Receiver<MgmtMsg>,
shutdown: bool,
}
impl<'a> NetPeer<'a> {
/*
* id: id of local Raft server
* conf: configuration for network peer
* to_raft: Sender for telling Raft about network messages
* mgmt_port: for peer manager
*/
pub fn spawn(id: u64, conf: &NetPeerConfig, to_raft: Sender<RaftMsg>) -> Sender<MgmtMsg> {
let (mgmt_send, mgmt_port) = channel();
let conf = conf.clone();
spawn(proc() {
let mut netpeer = NetPeer::new(id, conf, to_raft, mgmt_port);
netpeer.peer_loop();
});
mgmt_send
}
fn new(id: u64, config: NetPeerConfig, to_raft: Sender<RaftMsg>, mgmt_port: Receiver<MgmtMsg>) -> NetPeer {
NetPeer {
id: id,
conf: config,
stream: None,
to_raft: to_raft,
mgmt_port: mgmt_port,
shutdown: false,
}
}
fn try_connect(&mut self) -> bool {
self.check_mgmt_msg();
if self.shutdown {
return false;
}
match TcpStream::connect_timeout(self.conf.address, CONNECT_TIMEOUT) {
Ok(mut stream) => {
if stream.write(make_id_bytes(self.id).as_slice()).is_err() {
drop(stream);
return false;
}
debug!("[{}] Sent handshake req to {}", self.id, self.conf.id);
let success = self.attach_stream(stream.clone());
if!success {
drop(stream);
}
success
}
Err(e) => {
debug!("[{}] Err connecting to {}: {}@{}", self.id, self.conf.id, self.conf.address, e);
false
}
}
}
fn peer_loop(&mut self) {
while(self.stream.is_none()) {
debug!("[{}] No stream, trying to attach one.", self.id);
self.check_mgmt_msg();
if self.stream.is_none() { self.try_connect(); }
if self.shutdown { return; }
sleep(CONNECT_TIMEOUT);
}
let mut stream = self.stream.clone().unwrap();
let sender = self.to_raft.clone();
self.check_mgmt_msg();
debug!("[{}] Attached stream from {}.", self.id, stream.peer_name());
loop {
sleep(CONNECT_TIMEOUT);
self.check_mgmt_msg();
let either_rpc = read_rpc(stream.clone());
match either_rpc {
Ok(rpc) => {
self.check_mgmt_msg();
self.send_rpc(rpc, stream.clone());
}
Err(e) => {
self.check_mgmt_msg();
if self.stream.is_some() {
let mut stream = self.stream.take_unwrap();
drop(stream);
}
self.stream = None;
debug!("[{}] Dropped peer: {}", self.id, e);
break;
}
}
}
self.stream = None;
self.check_mgmt_msg();
if!self.shutdown {
debug!("[{}] No shutdown msg: spinning back up...", self.id);
self.peer_loop();
}
else {
debug!("[{}] shutting down.", self.id);
}
}
fn check_mgmt_msg(&mut self) {
match self.mgmt_port.try_recv() {
Ok(msg) => {
match msg {
AttachStreamMsg(id, mut stream) => {
if id == self.conf.id {
self.attach_stream(stream);
}
}
SendMsg(rpc) => |
StopMsg => {
self.shutdown = true;
self.stream = None;
}
}
}
_ => {
}
}
}
/*
* Send an RPC up to Raft, waiting for a reply if we need to.
*/
fn send_rpc(&self, rpc: RaftRpc, mut stream: TcpStream) -> bool {
match rpc {
RpcARQ(aereq) => {
debug!("[{}] Received ARQ: {}", self.id, aereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(ARQ(aereq, resp_send));
let aeres = resp_recv.recv();
let msg = as_network_msg(RpcARS(aeres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcARS(aeres) => {
debug!("[{}] Received ARS: {}", self.id, aeres);
self.to_raft.send(ARS(aeres));
true
}
RpcVRQ(votereq) => {
debug!("[{}] Received VRQ: {}", self.id, votereq);
let (resp_send, resp_recv) = channel();
self.to_raft.send(VRQ(votereq, resp_send));
let voteres = resp_recv.recv();
let msg = as_network_msg(RpcVRS(voteres));
match stream.write(msg.as_slice()) {
Ok(_) => true,
Err(_) => {
drop(stream);
false
}
}
}
RpcVRS(voteres) => {
debug!("[{}] Received VRS: {}", self.id, voteres);
self.to_raft.send(VRS(voteres));
true
}
RpcStopReq => {
debug!("[{}] Received RpcStop", self.id);
self.to_raft.send(StopReq);
false
}
}
}
/*
* If the node chose to connect to us, then we got a connection on our listening
* address and need to give the stream to us here.
*
* Returns: True if we successfully connected, false if we thought we already had
* an open connection to this peer (so this connection gets dropped).
*/
pub fn attach_stream(&mut self, stream: TcpStream) -> bool {
self.check_mgmt_msg();
if self.stream.is_some() || self.shutdown {
drop(stream);
return false;
}
self.stream = Some(stream);
true
}
}
// TODO: Get the old parsing code out of the Git history and work it into
// this configuration.
#[cfg(test)]
mod test {
use std::io::{TcpStream, BufferedReader, IoResult, IoError, InvalidInput};
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use std::io::{Acceptor, Listener, TcpListener, TcpStream};
use std::io::net::tcp::TcpAcceptor;
use super::super::super::events::*;
use uuid::{Uuid, UuidVersion, Version4Random};
use super::NetPeer;
use super::super::types::*;
use super::super::parsers::*;
#[test]
fn test_spawn() {
let pc = NetPeerConfig {
id: 1,
address: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8844,
},
client_addr: SocketAddr {
ip: Ipv4Addr(127, 0, 0, 1),
port: 8840,
},
};
let (send1, recv1) = channel();
let (send2, recv2) = channel();
let mut peer1_sd = NetPeer::spawn(2, &pc.clone(), send1);
let mut peer2_sd = NetPeer::spawn(3, &pc, send2);
let listener: TcpListener = TcpListener::bind("127.0.0.1", 8844).unwrap();
let mut acceptor: TcpAcceptor = listener.listen().unwrap();
// Spawn two peers
let mut count = 0;
// Send each peer the vote
let vote = VoteReq {
term: 0,
candidate_id: 0,
last_log_index: 0,
last_log_term: 0,
uuid: Uuid::new(Version4Random).unwrap(),
};
let from_raft_voteres = VoteRes {
term: 0,
vote_granted: true,
uuid: Uuid::new(Version4Random).unwrap(),
};
for mut stream in acceptor.incoming() {
let vote_bytes = as_network_msg(RpcVRQ(vote.clone()));
debug!("[test_spawn()] {}", read_helo(stream.clone()));
stream.write(vote_bytes);
count += 1;
debug!("[test_spawn()] Sent {} vote requests.", count);
if count > 1 {
break;
}
}
let mut replies = 0;
// We should get the votes back out on the port that we were waiting on
debug!("test_spawn(): waiting for replies");
spawn(proc() {
match recv1.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
});
match recv2.recv() {
VRQ(recvote, chan) => {
assert!(recvote.uuid == vote.uuid);
debug!("[test_spawn()] Sending reply from raft: {}", from_raft_voteres);
chan.send(from_raft_voteres);
}
_ => { fail!(); }
}
peer1_sd.send(StopMsg);
peer2_sd.send(StopMsg);
drop(acceptor);
}
}
| {
if self.stream.is_some() {
self.send_rpc(rpc, self.stream.clone().unwrap());
}
} | conditional_block |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct DecoderBuilder {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i]!= u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)]
pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self {
EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
}
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() | else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| {
z.extend(x);
break;
} | conditional_block |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct | {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i]!= u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)]
pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self {
EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
}
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() {
z.extend(x);
break;
} else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| DecoderBuilder | identifier_name |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct DecoderBuilder {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i]!= u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)] | EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
}
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() {
z.extend(x);
break;
} else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
} | pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self { | random_line_split |
huffman.rs | //! Length-limited Huffman Codes.
use crate::bit;
use alloc::{vec, vec::Vec};
use core::cmp;
use core2::io;
const MAX_BITWIDTH: u8 = 15;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Code {
pub width: u8,
pub bits: u16,
}
impl Code {
pub fn new(width: u8, bits: u16) -> Self {
debug_assert!(width <= MAX_BITWIDTH);
Code { width, bits }
}
fn inverse_endian(&self) -> Self {
let mut f = self.bits;
let mut t = 0;
for _ in 0..self.width {
t <<= 1;
t |= f & 1;
f >>= 1;
}
Code::new(self.width, t)
}
}
pub trait Builder: Sized {
type Instance;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()>;
fn finish(self) -> Self::Instance;
fn restore_canonical_huffman_codes(mut self, bitwidthes: &[u8]) -> io::Result<Self::Instance> {
debug_assert!(!bitwidthes.is_empty());
let mut symbols = bitwidthes
.iter()
.enumerate()
.filter(|&(_, &code_bitwidth)| code_bitwidth > 0)
.map(|(symbol, &code_bitwidth)| (symbol as u16, code_bitwidth))
.collect::<Vec<_>>();
symbols.sort_by_key(|x| x.1);
let mut code = 0;
let mut prev_width = 0;
for (symbol, bitwidth) in symbols {
code <<= bitwidth - prev_width;
self.set_mapping(symbol, Code::new(bitwidth, code))?;
code += 1;
prev_width = bitwidth;
}
Ok(self.finish())
}
}
pub struct DecoderBuilder {
table: Vec<u16>,
eob_symbol: Option<u16>,
safely_peek_bitwidth: Option<u8>,
max_bitwidth: u8,
}
impl DecoderBuilder {
pub fn new(
max_bitwidth: u8,
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> Self {
debug_assert!(max_bitwidth <= MAX_BITWIDTH);
DecoderBuilder {
table: vec![u16::from(MAX_BITWIDTH) + 1; 1 << max_bitwidth],
eob_symbol,
safely_peek_bitwidth,
max_bitwidth,
}
}
pub fn from_bitwidthes(
bitwidthes: &[u8],
safely_peek_bitwidth: Option<u8>,
eob_symbol: Option<u16>,
) -> io::Result<Decoder> {
let builder = Self::new(
bitwidthes.iter().cloned().max().unwrap_or(0),
safely_peek_bitwidth,
eob_symbol,
);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn safely_peek_bitwidth(&self) -> Option<u8> {
self.safely_peek_bitwidth
}
}
impl Builder for DecoderBuilder {
type Instance = Decoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> {
debug_assert!(code.width <= self.max_bitwidth);
if Some(symbol) == self.eob_symbol {
self.safely_peek_bitwidth = Some(code.width);
}
// `bitwidth` encoded `to` value
let value = (symbol << 5) | u16::from(code.width);
// Sets the mapping to all possible indices
let code_be = code.inverse_endian();
for padding in 0..(1 << (self.max_bitwidth - code.width)) {
let i = ((padding << code.width) | code_be.bits) as usize;
if self.table[i]!= u16::from(MAX_BITWIDTH) + 1 {
#[cfg(feature = "std")]
let message = format!(
"Bit region conflict: i={}, old_value={}, new_value={}, symbol={}, code={:?}",
i, self.table[i], value, symbol, code
);
#[cfg(not(feature = "std"))]
let message = "Bit region conflict";
return Err(io::Error::new(io::ErrorKind::InvalidData, message));
}
self.table[i] = value;
}
Ok(())
}
fn finish(self) -> Self::Instance {
Decoder {
table: self.table,
safely_peek_bitwidth: cmp::min(
self.max_bitwidth,
self.safely_peek_bitwidth.unwrap_or(1),
),
max_bitwidth: self.max_bitwidth,
}
}
}
#[derive(Debug)]
pub struct Decoder {
table: Vec<u16>,
safely_peek_bitwidth: u8,
max_bitwidth: u8,
}
impl Decoder {
pub fn safely_peek_bitwidth(&self) -> u8 {
self.safely_peek_bitwidth
}
#[inline(always)]
pub fn decode<R>(&self, reader: &mut bit::BitReader<R>) -> io::Result<u16>
where
R: io::Read,
{
let v = self.decode_unchecked(reader);
reader.check_last_error()?;
Ok(v)
}
#[inline(always)]
pub fn decode_unchecked<R>(&self, reader: &mut bit::BitReader<R>) -> u16
where
R: io::Read,
{
let mut value;
let mut bitwidth;
let mut peek_bitwidth = self.safely_peek_bitwidth;
loop {
let code = reader.peek_bits_unchecked(peek_bitwidth);
value = self.table[code as usize];
bitwidth = (value & 0b1_1111) as u8;
if bitwidth <= peek_bitwidth {
break;
}
if bitwidth > self.max_bitwidth {
reader.set_last_error(invalid_data_error!("Invalid huffman coded stream"));
break;
}
peek_bitwidth = bitwidth;
}
reader.skip_bits(bitwidth);
value >> 5
}
}
#[derive(Debug)]
pub struct EncoderBuilder {
table: Vec<Code>,
}
impl EncoderBuilder {
pub fn new(symbol_count: usize) -> Self {
EncoderBuilder {
table: vec![Code::new(0, 0); symbol_count],
}
}
pub fn from_bitwidthes(bitwidthes: &[u8]) -> io::Result<Encoder> {
let symbol_count = bitwidthes
.iter()
.enumerate()
.filter(|e| *e.1 > 0)
.last()
.map_or(0, |e| e.0)
+ 1;
let builder = Self::new(symbol_count);
builder.restore_canonical_huffman_codes(bitwidthes)
}
pub fn from_frequencies(symbol_frequencies: &[usize], max_bitwidth: u8) -> io::Result<Encoder> {
let max_bitwidth = cmp::min(
max_bitwidth,
ordinary_huffman_codes::calc_optimal_max_bitwidth(symbol_frequencies),
);
let code_bitwidthes = length_limited_huffman_codes::calc(max_bitwidth, symbol_frequencies);
Self::from_bitwidthes(&code_bitwidthes)
}
}
impl Builder for EncoderBuilder {
type Instance = Encoder;
fn set_mapping(&mut self, symbol: u16, code: Code) -> io::Result<()> |
fn finish(self) -> Self::Instance {
Encoder { table: self.table }
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
table: Vec<Code>,
}
impl Encoder {
#[inline(always)]
pub fn encode<W>(&self, writer: &mut bit::BitWriter<W>, symbol: u16) -> io::Result<()>
where
W: io::Write,
{
let code = self.lookup(symbol);
debug_assert_ne!(code, Code::new(0, 0));
writer.write_bits(code.width, code.bits)
}
#[inline(always)]
pub fn lookup(&self, symbol: u16) -> Code {
debug_assert!(
symbol < self.table.len() as u16,
"symbol:{}, table:{}",
symbol,
self.table.len()
);
self.table[symbol as usize].clone()
}
pub fn used_max_symbol(&self) -> Option<u16> {
self.table
.iter()
.rev()
.position(|x| x.width > 0)
.map(|trailing_zeros| (self.table.len() - 1 - trailing_zeros) as u16)
}
}
#[allow(dead_code)]
mod ordinary_huffman_codes {
use core::cmp;
use dary_heap::BinaryHeap;
pub fn calc_optimal_max_bitwidth(frequencies: &[usize]) -> u8 {
let mut heap = BinaryHeap::new();
for &freq in frequencies.iter().filter(|&&f| f > 0) {
let weight = -(freq as isize);
heap.push((weight, 0_u8));
}
while heap.len() > 1 {
let (weight1, width1) = heap.pop().unwrap();
let (weight2, width2) = heap.pop().unwrap();
heap.push((weight1 + weight2, 1 + cmp::max(width1, width2)));
}
let max_bitwidth = heap.pop().map_or(0, |x| x.1);
cmp::max(1, max_bitwidth)
}
}
mod length_limited_huffman_codes {
use alloc::{vec, vec::Vec};
use core::mem;
#[derive(Debug, Clone)]
struct Node {
symbols: Vec<u16>,
weight: usize,
}
impl Node {
pub fn empty() -> Self {
Node {
symbols: vec![],
weight: 0,
}
}
pub fn single(symbol: u16, weight: usize) -> Self {
Node {
symbols: vec![symbol],
weight,
}
}
pub fn merge(&mut self, other: Self) {
self.weight += other.weight;
self.symbols.extend(other.symbols);
}
}
/// Reference: [A Fast Algorithm for Optimal Length-Limited Huffman Codes][LenLimHuff.pdf]
///
/// [LenLimHuff.pdf]: https://www.ics.uci.edu/~dan/pubs/LenLimHuff.pdf
pub fn calc(max_bitwidth: u8, frequencies: &[usize]) -> Vec<u8> {
// NOTE: unoptimized implementation
let mut source = frequencies
.iter()
.enumerate()
.filter(|&(_, &f)| f > 0)
.map(|(symbol, &weight)| Node::single(symbol as u16, weight))
.collect::<Vec<_>>();
source.sort_by_key(|o| o.weight);
let weighted =
(0..max_bitwidth - 1).fold(source.clone(), |w, _| merge(package(w), source.clone()));
let mut code_bitwidthes = vec![0; frequencies.len()];
for symbol in package(weighted)
.into_iter()
.flat_map(|n| n.symbols.into_iter())
{
code_bitwidthes[symbol as usize] += 1;
}
code_bitwidthes
}
fn merge(x: Vec<Node>, y: Vec<Node>) -> Vec<Node> {
let mut z = Vec::with_capacity(x.len() + y.len());
let mut x = x.into_iter().peekable();
let mut y = y.into_iter().peekable();
loop {
let x_weight = x.peek().map(|s| s.weight);
let y_weight = y.peek().map(|s| s.weight);
if x_weight.is_none() {
z.extend(y);
break;
} else if y_weight.is_none() {
z.extend(x);
break;
} else if x_weight < y_weight {
z.push(x.next().unwrap());
} else {
z.push(y.next().unwrap());
}
}
z
}
fn package(mut nodes: Vec<Node>) -> Vec<Node> {
if nodes.len() >= 2 {
let new_len = nodes.len() / 2;
for i in 0..new_len {
nodes[i] = mem::replace(&mut nodes[i * 2], Node::empty());
let other = mem::replace(&mut nodes[i * 2 + 1], Node::empty());
nodes[i].merge(other);
}
nodes.truncate(new_len);
}
nodes
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {}
}
| {
debug_assert_eq!(self.table[symbol as usize], Code::new(0, 0));
self.table[symbol as usize] = code.inverse_endian();
Ok(())
} | identifier_body |
post.rs | //! `post` table parsing and writing.
use std::str;
use crate::binary::read::{ReadArray, ReadBinary, ReadCtxt};
use crate::binary::write::{WriteBinary, WriteContext};
use crate::binary::{I16Be, I32Be, U16Be, U32Be, U8};
use crate::error::{ParseError, WriteError};
pub struct PostTable<'a> {
pub header: Header,
pub opt_sub_table: Option<SubTable<'a>>,
}
pub struct Header {
pub version: i32,
pub italic_angle: i32,
pub underline_position: i16,
pub underline_thickness: i16,
pub is_fixed_pitch: u32,
pub min_mem_type_42: u32,
pub max_mem_type_42: u32,
pub min_mem_type_1: u32,
pub max_mem_type_1: u32,
}
pub struct SubTable<'a> {
pub num_glyphs: u16,
pub glyph_name_index: ReadArray<'a, U16Be>,
pub names: Vec<PascalString<'a>>,
}
#[derive(Clone)]
pub struct PascalString<'a> {
pub bytes: &'a [u8],
}
impl ReadBinary for Header {
type HostType<'b> = Self;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self, ParseError> {
let version = ctxt.read_i32be()?;
let italic_angle = ctxt.read_i32be()?;
let underline_position = ctxt.read_i16be()?;
let underline_thickness = ctxt.read_i16be()?;
let is_fixed_pitch = ctxt.read_u32be()?;
let min_mem_type_42 = ctxt.read_u32be()?;
let max_mem_type_42 = ctxt.read_u32be()?;
let min_mem_type_1 = ctxt.read_u32be()?;
let max_mem_type_1 = ctxt.read_u32be()?;
Ok(Header {
version,
italic_angle,
underline_position,
underline_thickness,
is_fixed_pitch,
min_mem_type_42,
max_mem_type_42,
min_mem_type_1,
max_mem_type_1,
})
}
}
impl WriteBinary<&Self> for Header {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &Header) -> Result<(), WriteError> {
I32Be::write(ctxt, table.version)?;
I32Be::write(ctxt, table.italic_angle)?;
I16Be::write(ctxt, table.underline_position)?;
I16Be::write(ctxt, table.underline_thickness)?;
U32Be::write(ctxt, table.is_fixed_pitch)?;
U32Be::write(ctxt, table.min_mem_type_42)?;
U32Be::write(ctxt, table.max_mem_type_42)?;
U32Be::write(ctxt, table.min_mem_type_1)?;
U32Be::write(ctxt, table.max_mem_type_1)?;
Ok(())
}
}
impl<'b> ReadBinary for PostTable<'b> {
type HostType<'a> = PostTable<'a>;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self::HostType<'a>, ParseError> {
let header = ctxt.read::<Header>()?;
let opt_sub_table = match header.version {
0x00020000 => {
// May include some Format 1 glyphs
let num_glyphs = ctxt.read_u16be()?;
let glyph_name_index = ctxt.read_array(usize::from(num_glyphs))?;
// Find the largest index used and use that to determine how many names to read
let names_to_read = glyph_name_index.iter().max().map_or(0, |max| {
(usize::from(max) + 1).saturating_sub(FORMAT_1_NAMES.len())
});
// Read the names
let mut names = Vec::with_capacity(names_to_read);
for _ in 0..names_to_read {
let length = ctxt.read_u8()?;
let bytes = ctxt.read_slice(usize::from(length))?;
names.push(PascalString { bytes });
}
Some(SubTable {
num_glyphs,
glyph_name_index,
names,
})
}
// TODO Handle post version 1.0, 2.5, 3.0
0x00010000 | 0x00025000 | 0x00030000 => None,
_ => return Err(ParseError::BadVersion),
};
Ok(PostTable {
header,
opt_sub_table,
})
}
}
impl<'a> WriteBinary<&Self> for PostTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &PostTable<'a>) -> Result<(), WriteError> {
Header::write(ctxt, &table.header)?;
if let Some(sub_table) = &table.opt_sub_table {
SubTable::write(ctxt, sub_table)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for SubTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &SubTable<'a>) -> Result<(), WriteError> {
U16Be::write(ctxt, table.num_glyphs)?;
<&ReadArray<'_, _>>::write(ctxt, &table.glyph_name_index)?;
for name in &table.names {
PascalString::write(ctxt, name)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for PascalString<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, string: &PascalString<'a>) -> Result<(), WriteError> {
if string.bytes.len() <= usize::from(std::u8::MAX) {
// cast is safe due to check above
U8::write(ctxt, string.bytes.len() as u8)?;
ctxt.write_bytes(string.bytes)?;
Ok(())
} else {
Err(WriteError::BadValue)
}
}
}
impl<'a> PostTable<'a> {
/// Retrieve the glyph name for the supplied `glyph_index`.
///
/// **Note:** Some fonts map more than one glyph to the same name so don't assume names are
/// unique.
pub fn glyph_name(&self, glyph_index: u16) -> Result<Option<&'a str>, ParseError> {
if let Some(sub_table) = &self.opt_sub_table {
if glyph_index >= sub_table.num_glyphs {
return Ok(None);
}
}
match &self.header.version {
0x00010000 if usize::from(glyph_index) < FORMAT_1_NAMES.len() => {
let name = FORMAT_1_NAMES[usize::from(glyph_index)];
Ok(Some(name))
}
0x00020000 => match &self.opt_sub_table {
Some(sub_table) => {
let name_index = sub_table
.glyph_name_index
.get_item(usize::from(glyph_index));
if usize::from(name_index) < FORMAT_1_NAMES.len() {
Ok(Some(FORMAT_1_NAMES[usize::from(name_index)]))
} else {
let index = usize::from(name_index) - FORMAT_1_NAMES.len();
let pascal_string = &sub_table.names[index];
match str::from_utf8(pascal_string.bytes) {
Ok(name) => Ok(Some(name)),
Err(_) => Err(ParseError::BadValue),
}
}
}
// If the table is version 2, the sub-table should exist
None => Err(ParseError::BadValue),
},
_ => Ok(None),
}
}
}
static FORMAT_1_NAMES: &[&str; 258] = &[
".notdef",
".null",
"nonmarkingreturn",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot", | "florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nonbreakingspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Zcaron",
"zcaron",
"brokenbar",
"Eth",
"eth",
"Yacute",
"yacute",
"Thorn",
"thorn",
"minus",
"multiply",
"onesuperior",
"twosuperior",
"threesuperior",
"onehalf",
"onequarter",
"threequarters",
"franc",
"Gbreve",
"gbreve",
"Idotaccent",
"Scedilla",
"scedilla",
"Cacute",
"cacute",
"Ccaron",
"ccaron",
"dcroat",
];
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::read::ReadScope;
use crate::binary::write::WriteBuffer;
#[test]
fn duplicate_glyph_names() {
// Test for post table that maps multiple glyphs to the same name index. Before a fix was
// implemented this table failed to parse.
let post_data = include_bytes!("../tests/fonts/opentype/post.bin");
let post = ReadScope::new(post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
match post.opt_sub_table {
Some(ref sub_table) => assert_eq!(sub_table.names.len(), 1872),
None => panic!("expected post table to have a sub-table"),
}
// These map to the same index (397)
assert_eq!(post.glyph_name(257).unwrap().unwrap(), "Ldot");
assert_eq!(post.glyph_name(1442).unwrap().unwrap(), "Ldot");
}
fn build_post_with_unused_names() -> Result<Vec<u8>, WriteError> {
// Build a post table with unused name entries
let mut w = WriteBuffer::new();
let header = Header {
version: 0x00020000,
italic_angle: 0,
underline_position: 0,
underline_thickness: 0,
is_fixed_pitch: 0,
min_mem_type_42: 0,
max_mem_type_42: 0,
min_mem_type_1: 0,
max_mem_type_1: 0,
};
Header::write(&mut w, &header)?;
let num_glyphs = 10u16;
U16Be::write(&mut w, num_glyphs)?;
// Write name indexes that have unused names between each used entry
U16Be::write(&mut w, 0u16)?; //.notdef
for i in 0..(num_glyphs - 1) {
U16Be::write(&mut w, i * 2 + 258)?;
}
// Write the names
for i in 1..num_glyphs {
// Write a real entry
let name = format!("gid{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
// Then the unused one in between
let name = format!("unused{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
}
Ok(w.into_inner())
}
#[test]
fn unused_glyph_names() {
let post_data = build_post_with_unused_names().expect("unable to build post table");
let post = ReadScope::new(&post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
let num_glyphs = post.opt_sub_table.as_ref().unwrap().num_glyphs;
for i in 0..num_glyphs {
let expected = if i == 0 {
String::from(".notdef")
} else {
format!("gid{}", i)
};
assert_eq!(post.glyph_name(i).unwrap().unwrap(), &expected);
}
}
} | "radical", | random_line_split |
post.rs | //! `post` table parsing and writing.
use std::str;
use crate::binary::read::{ReadArray, ReadBinary, ReadCtxt};
use crate::binary::write::{WriteBinary, WriteContext};
use crate::binary::{I16Be, I32Be, U16Be, U32Be, U8};
use crate::error::{ParseError, WriteError};
pub struct PostTable<'a> {
pub header: Header,
pub opt_sub_table: Option<SubTable<'a>>,
}
pub struct Header {
pub version: i32,
pub italic_angle: i32,
pub underline_position: i16,
pub underline_thickness: i16,
pub is_fixed_pitch: u32,
pub min_mem_type_42: u32,
pub max_mem_type_42: u32,
pub min_mem_type_1: u32,
pub max_mem_type_1: u32,
}
pub struct SubTable<'a> {
pub num_glyphs: u16,
pub glyph_name_index: ReadArray<'a, U16Be>,
pub names: Vec<PascalString<'a>>,
}
#[derive(Clone)]
pub struct PascalString<'a> {
pub bytes: &'a [u8],
}
impl ReadBinary for Header {
type HostType<'b> = Self;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self, ParseError> {
let version = ctxt.read_i32be()?;
let italic_angle = ctxt.read_i32be()?;
let underline_position = ctxt.read_i16be()?;
let underline_thickness = ctxt.read_i16be()?;
let is_fixed_pitch = ctxt.read_u32be()?;
let min_mem_type_42 = ctxt.read_u32be()?;
let max_mem_type_42 = ctxt.read_u32be()?;
let min_mem_type_1 = ctxt.read_u32be()?;
let max_mem_type_1 = ctxt.read_u32be()?;
Ok(Header {
version,
italic_angle,
underline_position,
underline_thickness,
is_fixed_pitch,
min_mem_type_42,
max_mem_type_42,
min_mem_type_1,
max_mem_type_1,
})
}
}
impl WriteBinary<&Self> for Header {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &Header) -> Result<(), WriteError> {
I32Be::write(ctxt, table.version)?;
I32Be::write(ctxt, table.italic_angle)?;
I16Be::write(ctxt, table.underline_position)?;
I16Be::write(ctxt, table.underline_thickness)?;
U32Be::write(ctxt, table.is_fixed_pitch)?;
U32Be::write(ctxt, table.min_mem_type_42)?;
U32Be::write(ctxt, table.max_mem_type_42)?;
U32Be::write(ctxt, table.min_mem_type_1)?;
U32Be::write(ctxt, table.max_mem_type_1)?;
Ok(())
}
}
impl<'b> ReadBinary for PostTable<'b> {
type HostType<'a> = PostTable<'a>;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self::HostType<'a>, ParseError> {
let header = ctxt.read::<Header>()?;
let opt_sub_table = match header.version {
0x00020000 => {
// May include some Format 1 glyphs
let num_glyphs = ctxt.read_u16be()?;
let glyph_name_index = ctxt.read_array(usize::from(num_glyphs))?;
// Find the largest index used and use that to determine how many names to read
let names_to_read = glyph_name_index.iter().max().map_or(0, |max| {
(usize::from(max) + 1).saturating_sub(FORMAT_1_NAMES.len())
});
// Read the names
let mut names = Vec::with_capacity(names_to_read);
for _ in 0..names_to_read {
let length = ctxt.read_u8()?;
let bytes = ctxt.read_slice(usize::from(length))?;
names.push(PascalString { bytes });
}
Some(SubTable {
num_glyphs,
glyph_name_index,
names,
})
}
// TODO Handle post version 1.0, 2.5, 3.0
0x00010000 | 0x00025000 | 0x00030000 => None,
_ => return Err(ParseError::BadVersion),
};
Ok(PostTable {
header,
opt_sub_table,
})
}
}
impl<'a> WriteBinary<&Self> for PostTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &PostTable<'a>) -> Result<(), WriteError> {
Header::write(ctxt, &table.header)?;
if let Some(sub_table) = &table.opt_sub_table {
SubTable::write(ctxt, sub_table)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for SubTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &SubTable<'a>) -> Result<(), WriteError> {
U16Be::write(ctxt, table.num_glyphs)?;
<&ReadArray<'_, _>>::write(ctxt, &table.glyph_name_index)?;
for name in &table.names {
PascalString::write(ctxt, name)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for PascalString<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, string: &PascalString<'a>) -> Result<(), WriteError> {
if string.bytes.len() <= usize::from(std::u8::MAX) {
// cast is safe due to check above
U8::write(ctxt, string.bytes.len() as u8)?;
ctxt.write_bytes(string.bytes)?;
Ok(())
} else {
Err(WriteError::BadValue)
}
}
}
impl<'a> PostTable<'a> {
/// Retrieve the glyph name for the supplied `glyph_index`.
///
/// **Note:** Some fonts map more than one glyph to the same name so don't assume names are
/// unique.
pub fn glyph_name(&self, glyph_index: u16) -> Result<Option<&'a str>, ParseError> {
if let Some(sub_table) = &self.opt_sub_table {
if glyph_index >= sub_table.num_glyphs {
return Ok(None);
}
}
match &self.header.version {
0x00010000 if usize::from(glyph_index) < FORMAT_1_NAMES.len() => {
let name = FORMAT_1_NAMES[usize::from(glyph_index)];
Ok(Some(name))
}
0x00020000 => match &self.opt_sub_table {
Some(sub_table) => {
let name_index = sub_table
.glyph_name_index
.get_item(usize::from(glyph_index));
if usize::from(name_index) < FORMAT_1_NAMES.len() {
Ok(Some(FORMAT_1_NAMES[usize::from(name_index)]))
} else {
let index = usize::from(name_index) - FORMAT_1_NAMES.len();
let pascal_string = &sub_table.names[index];
match str::from_utf8(pascal_string.bytes) {
Ok(name) => Ok(Some(name)),
Err(_) => Err(ParseError::BadValue),
}
}
}
// If the table is version 2, the sub-table should exist
None => Err(ParseError::BadValue),
},
_ => Ok(None),
}
}
}
static FORMAT_1_NAMES: &[&str; 258] = &[
".notdef",
".null",
"nonmarkingreturn",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nonbreakingspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Zcaron",
"zcaron",
"brokenbar",
"Eth",
"eth",
"Yacute",
"yacute",
"Thorn",
"thorn",
"minus",
"multiply",
"onesuperior",
"twosuperior",
"threesuperior",
"onehalf",
"onequarter",
"threequarters",
"franc",
"Gbreve",
"gbreve",
"Idotaccent",
"Scedilla",
"scedilla",
"Cacute",
"cacute",
"Ccaron",
"ccaron",
"dcroat",
];
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::read::ReadScope;
use crate::binary::write::WriteBuffer;
#[test]
fn duplicate_glyph_names() {
// Test for post table that maps multiple glyphs to the same name index. Before a fix was
// implemented this table failed to parse.
let post_data = include_bytes!("../tests/fonts/opentype/post.bin");
let post = ReadScope::new(post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
match post.opt_sub_table {
Some(ref sub_table) => assert_eq!(sub_table.names.len(), 1872),
None => panic!("expected post table to have a sub-table"),
}
// These map to the same index (397)
assert_eq!(post.glyph_name(257).unwrap().unwrap(), "Ldot");
assert_eq!(post.glyph_name(1442).unwrap().unwrap(), "Ldot");
}
fn build_post_with_unused_names() -> Result<Vec<u8>, WriteError> | // Write name indexes that have unused names between each used entry
U16Be::write(&mut w, 0u16)?; //.notdef
for i in 0..(num_glyphs - 1) {
U16Be::write(&mut w, i * 2 + 258)?;
}
// Write the names
for i in 1..num_glyphs {
// Write a real entry
let name = format!("gid{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
// Then the unused one in between
let name = format!("unused{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
}
Ok(w.into_inner())
}
#[test]
fn unused_glyph_names() {
let post_data = build_post_with_unused_names().expect("unable to build post table");
let post = ReadScope::new(&post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
let num_glyphs = post.opt_sub_table.as_ref().unwrap().num_glyphs;
for i in 0..num_glyphs {
let expected = if i == 0 {
String::from(".notdef")
} else {
format!("gid{}", i)
};
assert_eq!(post.glyph_name(i).unwrap().unwrap(), &expected);
}
}
}
| {
// Build a post table with unused name entries
let mut w = WriteBuffer::new();
let header = Header {
version: 0x00020000,
italic_angle: 0,
underline_position: 0,
underline_thickness: 0,
is_fixed_pitch: 0,
min_mem_type_42: 0,
max_mem_type_42: 0,
min_mem_type_1: 0,
max_mem_type_1: 0,
};
Header::write(&mut w, &header)?;
let num_glyphs = 10u16;
U16Be::write(&mut w, num_glyphs)?;
| identifier_body |
post.rs | //! `post` table parsing and writing.
use std::str;
use crate::binary::read::{ReadArray, ReadBinary, ReadCtxt};
use crate::binary::write::{WriteBinary, WriteContext};
use crate::binary::{I16Be, I32Be, U16Be, U32Be, U8};
use crate::error::{ParseError, WriteError};
pub struct PostTable<'a> {
pub header: Header,
pub opt_sub_table: Option<SubTable<'a>>,
}
pub struct Header {
pub version: i32,
pub italic_angle: i32,
pub underline_position: i16,
pub underline_thickness: i16,
pub is_fixed_pitch: u32,
pub min_mem_type_42: u32,
pub max_mem_type_42: u32,
pub min_mem_type_1: u32,
pub max_mem_type_1: u32,
}
pub struct SubTable<'a> {
pub num_glyphs: u16,
pub glyph_name_index: ReadArray<'a, U16Be>,
pub names: Vec<PascalString<'a>>,
}
#[derive(Clone)]
pub struct PascalString<'a> {
pub bytes: &'a [u8],
}
impl ReadBinary for Header {
type HostType<'b> = Self;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self, ParseError> {
let version = ctxt.read_i32be()?;
let italic_angle = ctxt.read_i32be()?;
let underline_position = ctxt.read_i16be()?;
let underline_thickness = ctxt.read_i16be()?;
let is_fixed_pitch = ctxt.read_u32be()?;
let min_mem_type_42 = ctxt.read_u32be()?;
let max_mem_type_42 = ctxt.read_u32be()?;
let min_mem_type_1 = ctxt.read_u32be()?;
let max_mem_type_1 = ctxt.read_u32be()?;
Ok(Header {
version,
italic_angle,
underline_position,
underline_thickness,
is_fixed_pitch,
min_mem_type_42,
max_mem_type_42,
min_mem_type_1,
max_mem_type_1,
})
}
}
impl WriteBinary<&Self> for Header {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &Header) -> Result<(), WriteError> {
I32Be::write(ctxt, table.version)?;
I32Be::write(ctxt, table.italic_angle)?;
I16Be::write(ctxt, table.underline_position)?;
I16Be::write(ctxt, table.underline_thickness)?;
U32Be::write(ctxt, table.is_fixed_pitch)?;
U32Be::write(ctxt, table.min_mem_type_42)?;
U32Be::write(ctxt, table.max_mem_type_42)?;
U32Be::write(ctxt, table.min_mem_type_1)?;
U32Be::write(ctxt, table.max_mem_type_1)?;
Ok(())
}
}
impl<'b> ReadBinary for PostTable<'b> {
type HostType<'a> = PostTable<'a>;
fn read<'a>(ctxt: &mut ReadCtxt<'a>) -> Result<Self::HostType<'a>, ParseError> {
let header = ctxt.read::<Header>()?;
let opt_sub_table = match header.version {
0x00020000 => {
// May include some Format 1 glyphs
let num_glyphs = ctxt.read_u16be()?;
let glyph_name_index = ctxt.read_array(usize::from(num_glyphs))?;
// Find the largest index used and use that to determine how many names to read
let names_to_read = glyph_name_index.iter().max().map_or(0, |max| {
(usize::from(max) + 1).saturating_sub(FORMAT_1_NAMES.len())
});
// Read the names
let mut names = Vec::with_capacity(names_to_read);
for _ in 0..names_to_read {
let length = ctxt.read_u8()?;
let bytes = ctxt.read_slice(usize::from(length))?;
names.push(PascalString { bytes });
}
Some(SubTable {
num_glyphs,
glyph_name_index,
names,
})
}
// TODO Handle post version 1.0, 2.5, 3.0
0x00010000 | 0x00025000 | 0x00030000 => None,
_ => return Err(ParseError::BadVersion),
};
Ok(PostTable {
header,
opt_sub_table,
})
}
}
impl<'a> WriteBinary<&Self> for PostTable<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, table: &PostTable<'a>) -> Result<(), WriteError> {
Header::write(ctxt, &table.header)?;
if let Some(sub_table) = &table.opt_sub_table {
SubTable::write(ctxt, sub_table)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for SubTable<'a> {
type Output = ();
fn | <C: WriteContext>(ctxt: &mut C, table: &SubTable<'a>) -> Result<(), WriteError> {
U16Be::write(ctxt, table.num_glyphs)?;
<&ReadArray<'_, _>>::write(ctxt, &table.glyph_name_index)?;
for name in &table.names {
PascalString::write(ctxt, name)?;
}
Ok(())
}
}
impl<'a> WriteBinary<&Self> for PascalString<'a> {
type Output = ();
fn write<C: WriteContext>(ctxt: &mut C, string: &PascalString<'a>) -> Result<(), WriteError> {
if string.bytes.len() <= usize::from(std::u8::MAX) {
// cast is safe due to check above
U8::write(ctxt, string.bytes.len() as u8)?;
ctxt.write_bytes(string.bytes)?;
Ok(())
} else {
Err(WriteError::BadValue)
}
}
}
impl<'a> PostTable<'a> {
/// Retrieve the glyph name for the supplied `glyph_index`.
///
/// **Note:** Some fonts map more than one glyph to the same name so don't assume names are
/// unique.
pub fn glyph_name(&self, glyph_index: u16) -> Result<Option<&'a str>, ParseError> {
if let Some(sub_table) = &self.opt_sub_table {
if glyph_index >= sub_table.num_glyphs {
return Ok(None);
}
}
match &self.header.version {
0x00010000 if usize::from(glyph_index) < FORMAT_1_NAMES.len() => {
let name = FORMAT_1_NAMES[usize::from(glyph_index)];
Ok(Some(name))
}
0x00020000 => match &self.opt_sub_table {
Some(sub_table) => {
let name_index = sub_table
.glyph_name_index
.get_item(usize::from(glyph_index));
if usize::from(name_index) < FORMAT_1_NAMES.len() {
Ok(Some(FORMAT_1_NAMES[usize::from(name_index)]))
} else {
let index = usize::from(name_index) - FORMAT_1_NAMES.len();
let pascal_string = &sub_table.names[index];
match str::from_utf8(pascal_string.bytes) {
Ok(name) => Ok(Some(name)),
Err(_) => Err(ParseError::BadValue),
}
}
}
// If the table is version 2, the sub-table should exist
None => Err(ParseError::BadValue),
},
_ => Ok(None),
}
}
}
static FORMAT_1_NAMES: &[&str; 258] = &[
".notdef",
".null",
"nonmarkingreturn",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nonbreakingspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Zcaron",
"zcaron",
"brokenbar",
"Eth",
"eth",
"Yacute",
"yacute",
"Thorn",
"thorn",
"minus",
"multiply",
"onesuperior",
"twosuperior",
"threesuperior",
"onehalf",
"onequarter",
"threequarters",
"franc",
"Gbreve",
"gbreve",
"Idotaccent",
"Scedilla",
"scedilla",
"Cacute",
"cacute",
"Ccaron",
"ccaron",
"dcroat",
];
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::read::ReadScope;
use crate::binary::write::WriteBuffer;
#[test]
fn duplicate_glyph_names() {
// Test for post table that maps multiple glyphs to the same name index. Before a fix was
// implemented this table failed to parse.
let post_data = include_bytes!("../tests/fonts/opentype/post.bin");
let post = ReadScope::new(post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
match post.opt_sub_table {
Some(ref sub_table) => assert_eq!(sub_table.names.len(), 1872),
None => panic!("expected post table to have a sub-table"),
}
// These map to the same index (397)
assert_eq!(post.glyph_name(257).unwrap().unwrap(), "Ldot");
assert_eq!(post.glyph_name(1442).unwrap().unwrap(), "Ldot");
}
fn build_post_with_unused_names() -> Result<Vec<u8>, WriteError> {
// Build a post table with unused name entries
let mut w = WriteBuffer::new();
let header = Header {
version: 0x00020000,
italic_angle: 0,
underline_position: 0,
underline_thickness: 0,
is_fixed_pitch: 0,
min_mem_type_42: 0,
max_mem_type_42: 0,
min_mem_type_1: 0,
max_mem_type_1: 0,
};
Header::write(&mut w, &header)?;
let num_glyphs = 10u16;
U16Be::write(&mut w, num_glyphs)?;
// Write name indexes that have unused names between each used entry
U16Be::write(&mut w, 0u16)?; //.notdef
for i in 0..(num_glyphs - 1) {
U16Be::write(&mut w, i * 2 + 258)?;
}
// Write the names
for i in 1..num_glyphs {
// Write a real entry
let name = format!("gid{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
// Then the unused one in between
let name = format!("unused{}", i);
let string = PascalString {
bytes: name.as_bytes(),
};
PascalString::write(&mut w, &string)?;
}
Ok(w.into_inner())
}
#[test]
fn unused_glyph_names() {
let post_data = build_post_with_unused_names().expect("unable to build post table");
let post = ReadScope::new(&post_data)
.read::<PostTable<'_>>()
.expect("unable to parse post table");
let num_glyphs = post.opt_sub_table.as_ref().unwrap().num_glyphs;
for i in 0..num_glyphs {
let expected = if i == 0 {
String::from(".notdef")
} else {
format!("gid{}", i)
};
assert_eq!(post.glyph_name(i).unwrap().unwrap(), &expected);
}
}
}
| write | identifier_name |
main.rs | //! Default Compute@Edge template program.
use fastly::http::{header, HeaderValue, Method, StatusCode};
use fastly::{mime, Dictionary, Error, Request, Response};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// The name of a backend server associated with this service.
///
/// This should be changed to match the name of your own backend. See the the `Hosts` section of
/// the Fastly WASM service UI for more information.
const FASTLY_API_BACKEND_NAME: &str = "fastly_api_backend";
const FASTLY_API_BASE: &str = "https://api.fastly.com";
const FASTLY_API_DATACENTER_ENDPOINT: &str = "https://api.fastly.com/datacenters";
/// The name of a second backend associated with this service.
const POP_STATUS_API_BACKEND_NAME: &str = "pop_status_backend";
const POP_STATUS_API_ENDPOINT: &str = "https://service-scraper.edgecompute.app/";
const APP_DATA_DICT: &str = "app_data";
const STATUS_VALUES: &[&str] = &[
"Operational",
"Degraded Performance",
"Partial Outage",
"Major Outage",
"Maintenance",
"Not Available",
];
#[derive(Serialize, Deserialize, Debug)]
struct Coordinates {
x: u32,
y: u32,
latitude: f64,
longitude: f64,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopData {
code: String,
name: String,
group: String,
coordinates: Coordinates,
shield: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
struct StatusData {
code: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusData {
code: String,
name: String,
latitude: f64,
longitude: f64,
group: String,
shield: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusResponse {
current_pop: String,
pop_status_data: Vec<PopStatusData>,
}
#[derive(Serialize, Deserialize, Debug)]
struct DictionaryInfo {
dictionary_id: String,
service_id: String,
item_key: String,
item_value: String,
}
/// The entry point for your application.
///
/// This function is triggered when your service receives a client request. It could be used to
/// route based on the request properties (such as method or path), send the request to a backend,
/// make completely new requests, and/or generate synthetic responses.
///
/// If `main` returns an error, a 500 error response will be delivered to the client.
#[fastly::main]
fn main(req: Request) -> Result<Response, Error> {
println!(
"Amy and the Geeks version:{}",
std::env::var("FASTLY_SERVICE_VERSION").unwrap_or_else(|_| String::new())
);
let current_pop = std::env::var("FASTLY_POP").unwrap_or_else(|_| String::new());
println!("Current:{}", current_pop);
// Filter request methods...
match req.get_method() {
// Allow GET and HEAD requests.
&Method::GET | &Method::HEAD | &Method::PUT => (),
// Accept PURGE requests; it does not matter to which backend they are sent.
m if m == "PURGE" => (),
// Deny anything else.
_ => {
return Ok(Response::from_status(StatusCode::METHOD_NOT_ALLOWED)
.with_header(header::ALLOW, "GET, HEAD")
.with_body_text_plain("This method is not allowed\n"))
}
};
let app_data_dict = Dictionary::open(APP_DATA_DICT);
let service_id = std::env::var("FASTLY_SERVICE_ID").unwrap_or_else(|_| String::new());
// We need the dictionary id for API calls.
let dict_id = app_data_dict.get("dict_id").unwrap();
let fsly_api_token = app_data_dict.get("api_key").unwrap();
let the_path = req.get_path();
println!("Path: {}", the_path);
// Pattern match on the path.
match the_path {
// If request is to the `/` path, send a default response.
"/" | "/noscrape" => {
let pop_response = Request::new(Method::GET, FASTLY_API_DATACENTER_ENDPOINT)
.with_header("Fastly-Key", &fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME)?;
let body_str = pop_response.into_body_str();
let pop_vec: Vec<PopData> = serde_json::from_str(&body_str).unwrap();
let mut status_map: Option<HashMap<&str, &str>> = None;
let status_vec: Vec<StatusData>;
if the_path!= "/noscrape" {
let status_response = Request::new(Method::GET, POP_STATUS_API_ENDPOINT)
.with_header(header::ACCEPT, "application/json")
.send(POP_STATUS_API_BACKEND_NAME)?;
println!("Status response: {:?}", status_response.get_status());
let status_body_str = status_response.into_body_str();
// println!("Status body: {}", &status_body_str);
status_vec = serde_json::from_str(&status_body_str).unwrap();
status_map = Some(
status_vec
.iter()
.map(|status| (status.code.as_str(), status.status.as_str()))
.collect(),
);
}
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let modified_pop_status_map: HashMap<&str, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let pop_status_vec: Vec<PopStatusData> = pop_vec
.iter()
.map(|pop| {
let pop_code = pop.code.to_string();
let status = get_pop_status(&pop_code, &status_map, &modified_pop_status_map);
let shield = match &pop.shield {
Some(s) => s,
None => "",
};
PopStatusData {
code: pop_code,
name: pop.name.to_string(),
latitude: pop.coordinates.latitude,
longitude: pop.coordinates.longitude,
group: pop.group.to_string(),
shield: shield.to_string(),
status,
}
})
.collect();
let pop_status_response: PopStatusResponse = PopStatusResponse {
current_pop,
pop_status_data: pop_status_vec,
};
let pop_status_json = serde_json::to_string(&pop_status_response)?;
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(pop_status_json))
}
"/set_pop" => {
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() |
let modified_pop_status= modified_pop_status_opt.unwrap();
let mut modified_pop_status_map: HashMap<String, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let query_params: Vec<(String, String)> = req.get_query().unwrap();
println!("QP: {:?}", query_params);
if query_params.is_empty() {
let response = Response::from_body(modified_pop_status)
.with_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
);
return Ok(response);
}
for (pop, status) in query_params {
if pop == "*" {
if status == "-" {
modified_pop_status_map.clear();
} else {
modified_pop_status_map
.insert("*".to_string(), status.parse::<u8>().unwrap());
}
} else {
if status == "-" {
modified_pop_status_map.remove(pop.as_str());
} else {
modified_pop_status_map.insert(pop, status.parse::<u8>().unwrap());
}
}
}
// /service/service_id/dictionary/dictionary_id/item/dictionary_item_key
let the_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
let the_body = format!(
"item_value={}",
serde_json::to_string(&modified_pop_status_map)?
);
let dict_api_response = Request::new(Method::PUT, the_url)
.with_header("Fastly-Key", fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.with_header(header::CONTENT_TYPE, "application/x-www-form-urlencoded")
.with_body(the_body)
.send(FASTLY_API_BACKEND_NAME)?;
if dict_api_response.get_status() == StatusCode::OK {
let body_str = dict_api_response.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(dict_info.item_value))
} else {
Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem mofifying dictionary\n"))
}
}
// Catch all other requests and return a 404.
_ => Ok(Response::from_status(StatusCode::NOT_FOUND)
.with_body_text_plain("The page you requested could not be found\n")),
}
}
fn get_pop_status(
pop_code: &str,
status_map: &Option<HashMap<&str, &str>>,
modified_pop_status_vec: &HashMap<&str, u8>,
) -> String {
if modified_pop_status_vec.contains_key("*") {
let pc_index = modified_pop_status_vec["*"];
if pc_index < STATUS_VALUES.len() as u8 {
STATUS_VALUES[pc_index as usize].to_string()
} else {
get_status_from_map(pop_code, status_map)
}
} else {
match modified_pop_status_vec.get(pop_code) {
Some(pc_index) => STATUS_VALUES[*pc_index as usize].to_string(),
None => get_status_from_map(pop_code, status_map),
}
}
}
fn get_status_from_map(pop_code: &str, status_map: &Option<HashMap<&str, &str>>) -> String {
match status_map {
Some(map) => match map.get(pop_code) {
Some(status) => status.parse().unwrap(),
None => "Not Available".to_string(),
},
None => "Not Available".to_string(),
}
}
// This is calling the Fastly API to get the dictionary. You might ask why I'm not just accessing
// it on the edge. Reason being to avoid a race where we read it on the edge then write it with the
// API. Still not ideal as there could be a race with another pop but it will do until we have a
// KV store
fn get_modified_pop_status(service_id: &str, dict_id: &str, api_token: &str) -> Option<String> {
let dict_item_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_resp = Request::new(Method::GET, dict_item_url)
.with_header("Fastly-Key", api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME).unwrap();
if modified_pop_status_resp.get_status() == StatusCode::OK {
let body_str = modified_pop_status_resp.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
let modified_pop_status = dict_info.item_value;
println!("MPS: {}", modified_pop_status);
Some(modified_pop_status)
} else {
None
}
}
| {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
} | conditional_block |
main.rs | //! Default Compute@Edge template program.
use fastly::http::{header, HeaderValue, Method, StatusCode};
use fastly::{mime, Dictionary, Error, Request, Response};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// The name of a backend server associated with this service.
///
/// This should be changed to match the name of your own backend. See the the `Hosts` section of
/// the Fastly WASM service UI for more information.
const FASTLY_API_BACKEND_NAME: &str = "fastly_api_backend";
const FASTLY_API_BASE: &str = "https://api.fastly.com";
const FASTLY_API_DATACENTER_ENDPOINT: &str = "https://api.fastly.com/datacenters";
/// The name of a second backend associated with this service.
const POP_STATUS_API_BACKEND_NAME: &str = "pop_status_backend";
const POP_STATUS_API_ENDPOINT: &str = "https://service-scraper.edgecompute.app/";
const APP_DATA_DICT: &str = "app_data";
const STATUS_VALUES: &[&str] = &[
"Operational",
"Degraded Performance",
"Partial Outage",
"Major Outage",
"Maintenance",
"Not Available",
];
#[derive(Serialize, Deserialize, Debug)]
struct Coordinates {
x: u32,
y: u32,
latitude: f64,
longitude: f64,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopData {
code: String,
name: String,
group: String,
coordinates: Coordinates,
shield: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
struct StatusData {
code: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusData {
code: String,
name: String,
latitude: f64,
longitude: f64,
group: String,
shield: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct | {
current_pop: String,
pop_status_data: Vec<PopStatusData>,
}
#[derive(Serialize, Deserialize, Debug)]
struct DictionaryInfo {
dictionary_id: String,
service_id: String,
item_key: String,
item_value: String,
}
/// The entry point for your application.
///
/// This function is triggered when your service receives a client request. It could be used to
/// route based on the request properties (such as method or path), send the request to a backend,
/// make completely new requests, and/or generate synthetic responses.
///
/// If `main` returns an error, a 500 error response will be delivered to the client.
#[fastly::main]
fn main(req: Request) -> Result<Response, Error> {
println!(
"Amy and the Geeks version:{}",
std::env::var("FASTLY_SERVICE_VERSION").unwrap_or_else(|_| String::new())
);
let current_pop = std::env::var("FASTLY_POP").unwrap_or_else(|_| String::new());
println!("Current:{}", current_pop);
// Filter request methods...
match req.get_method() {
// Allow GET and HEAD requests.
&Method::GET | &Method::HEAD | &Method::PUT => (),
// Accept PURGE requests; it does not matter to which backend they are sent.
m if m == "PURGE" => (),
// Deny anything else.
_ => {
return Ok(Response::from_status(StatusCode::METHOD_NOT_ALLOWED)
.with_header(header::ALLOW, "GET, HEAD")
.with_body_text_plain("This method is not allowed\n"))
}
};
let app_data_dict = Dictionary::open(APP_DATA_DICT);
let service_id = std::env::var("FASTLY_SERVICE_ID").unwrap_or_else(|_| String::new());
// We need the dictionary id for API calls.
let dict_id = app_data_dict.get("dict_id").unwrap();
let fsly_api_token = app_data_dict.get("api_key").unwrap();
let the_path = req.get_path();
println!("Path: {}", the_path);
// Pattern match on the path.
match the_path {
// If request is to the `/` path, send a default response.
"/" | "/noscrape" => {
let pop_response = Request::new(Method::GET, FASTLY_API_DATACENTER_ENDPOINT)
.with_header("Fastly-Key", &fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME)?;
let body_str = pop_response.into_body_str();
let pop_vec: Vec<PopData> = serde_json::from_str(&body_str).unwrap();
let mut status_map: Option<HashMap<&str, &str>> = None;
let status_vec: Vec<StatusData>;
if the_path!= "/noscrape" {
let status_response = Request::new(Method::GET, POP_STATUS_API_ENDPOINT)
.with_header(header::ACCEPT, "application/json")
.send(POP_STATUS_API_BACKEND_NAME)?;
println!("Status response: {:?}", status_response.get_status());
let status_body_str = status_response.into_body_str();
// println!("Status body: {}", &status_body_str);
status_vec = serde_json::from_str(&status_body_str).unwrap();
status_map = Some(
status_vec
.iter()
.map(|status| (status.code.as_str(), status.status.as_str()))
.collect(),
);
}
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let modified_pop_status_map: HashMap<&str, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let pop_status_vec: Vec<PopStatusData> = pop_vec
.iter()
.map(|pop| {
let pop_code = pop.code.to_string();
let status = get_pop_status(&pop_code, &status_map, &modified_pop_status_map);
let shield = match &pop.shield {
Some(s) => s,
None => "",
};
PopStatusData {
code: pop_code,
name: pop.name.to_string(),
latitude: pop.coordinates.latitude,
longitude: pop.coordinates.longitude,
group: pop.group.to_string(),
shield: shield.to_string(),
status,
}
})
.collect();
let pop_status_response: PopStatusResponse = PopStatusResponse {
current_pop,
pop_status_data: pop_status_vec,
};
let pop_status_json = serde_json::to_string(&pop_status_response)?;
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(pop_status_json))
}
"/set_pop" => {
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let mut modified_pop_status_map: HashMap<String, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let query_params: Vec<(String, String)> = req.get_query().unwrap();
println!("QP: {:?}", query_params);
if query_params.is_empty() {
let response = Response::from_body(modified_pop_status)
.with_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
);
return Ok(response);
}
for (pop, status) in query_params {
if pop == "*" {
if status == "-" {
modified_pop_status_map.clear();
} else {
modified_pop_status_map
.insert("*".to_string(), status.parse::<u8>().unwrap());
}
} else {
if status == "-" {
modified_pop_status_map.remove(pop.as_str());
} else {
modified_pop_status_map.insert(pop, status.parse::<u8>().unwrap());
}
}
}
// /service/service_id/dictionary/dictionary_id/item/dictionary_item_key
let the_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
let the_body = format!(
"item_value={}",
serde_json::to_string(&modified_pop_status_map)?
);
let dict_api_response = Request::new(Method::PUT, the_url)
.with_header("Fastly-Key", fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.with_header(header::CONTENT_TYPE, "application/x-www-form-urlencoded")
.with_body(the_body)
.send(FASTLY_API_BACKEND_NAME)?;
if dict_api_response.get_status() == StatusCode::OK {
let body_str = dict_api_response.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(dict_info.item_value))
} else {
Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem mofifying dictionary\n"))
}
}
// Catch all other requests and return a 404.
_ => Ok(Response::from_status(StatusCode::NOT_FOUND)
.with_body_text_plain("The page you requested could not be found\n")),
}
}
fn get_pop_status(
pop_code: &str,
status_map: &Option<HashMap<&str, &str>>,
modified_pop_status_vec: &HashMap<&str, u8>,
) -> String {
if modified_pop_status_vec.contains_key("*") {
let pc_index = modified_pop_status_vec["*"];
if pc_index < STATUS_VALUES.len() as u8 {
STATUS_VALUES[pc_index as usize].to_string()
} else {
get_status_from_map(pop_code, status_map)
}
} else {
match modified_pop_status_vec.get(pop_code) {
Some(pc_index) => STATUS_VALUES[*pc_index as usize].to_string(),
None => get_status_from_map(pop_code, status_map),
}
}
}
fn get_status_from_map(pop_code: &str, status_map: &Option<HashMap<&str, &str>>) -> String {
match status_map {
Some(map) => match map.get(pop_code) {
Some(status) => status.parse().unwrap(),
None => "Not Available".to_string(),
},
None => "Not Available".to_string(),
}
}
// This is calling the Fastly API to get the dictionary. You might ask why I'm not just accessing
// it on the edge. Reason being to avoid a race where we read it on the edge then write it with the
// API. Still not ideal as there could be a race with another pop but it will do until we have a
// KV store
fn get_modified_pop_status(service_id: &str, dict_id: &str, api_token: &str) -> Option<String> {
let dict_item_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_resp = Request::new(Method::GET, dict_item_url)
.with_header("Fastly-Key", api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME).unwrap();
if modified_pop_status_resp.get_status() == StatusCode::OK {
let body_str = modified_pop_status_resp.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
let modified_pop_status = dict_info.item_value;
println!("MPS: {}", modified_pop_status);
Some(modified_pop_status)
} else {
None
}
}
| PopStatusResponse | identifier_name |
main.rs | //! Default Compute@Edge template program.
use fastly::http::{header, HeaderValue, Method, StatusCode};
use fastly::{mime, Dictionary, Error, Request, Response};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// The name of a backend server associated with this service.
///
/// This should be changed to match the name of your own backend. See the the `Hosts` section of
/// the Fastly WASM service UI for more information.
const FASTLY_API_BACKEND_NAME: &str = "fastly_api_backend";
const FASTLY_API_BASE: &str = "https://api.fastly.com";
const FASTLY_API_DATACENTER_ENDPOINT: &str = "https://api.fastly.com/datacenters";
/// The name of a second backend associated with this service.
const POP_STATUS_API_BACKEND_NAME: &str = "pop_status_backend";
const POP_STATUS_API_ENDPOINT: &str = "https://service-scraper.edgecompute.app/";
const APP_DATA_DICT: &str = "app_data";
const STATUS_VALUES: &[&str] = &[
"Operational",
"Degraded Performance",
"Partial Outage",
"Major Outage",
"Maintenance",
"Not Available",
];
#[derive(Serialize, Deserialize, Debug)]
struct Coordinates {
x: u32,
y: u32,
latitude: f64,
longitude: f64,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopData {
code: String,
name: String,
group: String,
coordinates: Coordinates,
shield: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
struct StatusData {
code: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusData {
code: String,
name: String,
latitude: f64,
longitude: f64,
group: String,
shield: String,
status: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct PopStatusResponse {
current_pop: String,
pop_status_data: Vec<PopStatusData>,
}
#[derive(Serialize, Deserialize, Debug)]
struct DictionaryInfo {
dictionary_id: String,
service_id: String,
item_key: String,
item_value: String,
}
/// The entry point for your application.
///
/// This function is triggered when your service receives a client request. It could be used to
/// route based on the request properties (such as method or path), send the request to a backend,
/// make completely new requests, and/or generate synthetic responses.
///
/// If `main` returns an error, a 500 error response will be delivered to the client.
#[fastly::main]
fn main(req: Request) -> Result<Response, Error> {
println!(
"Amy and the Geeks version:{}",
std::env::var("FASTLY_SERVICE_VERSION").unwrap_or_else(|_| String::new())
);
let current_pop = std::env::var("FASTLY_POP").unwrap_or_else(|_| String::new());
println!("Current:{}", current_pop);
// Filter request methods...
match req.get_method() {
// Allow GET and HEAD requests.
&Method::GET | &Method::HEAD | &Method::PUT => (),
// Accept PURGE requests; it does not matter to which backend they are sent.
m if m == "PURGE" => (),
// Deny anything else.
_ => {
return Ok(Response::from_status(StatusCode::METHOD_NOT_ALLOWED)
.with_header(header::ALLOW, "GET, HEAD")
.with_body_text_plain("This method is not allowed\n"))
}
};
let app_data_dict = Dictionary::open(APP_DATA_DICT);
let service_id = std::env::var("FASTLY_SERVICE_ID").unwrap_or_else(|_| String::new());
// We need the dictionary id for API calls.
let dict_id = app_data_dict.get("dict_id").unwrap();
let fsly_api_token = app_data_dict.get("api_key").unwrap();
let the_path = req.get_path();
println!("Path: {}", the_path);
// Pattern match on the path.
match the_path {
// If request is to the `/` path, send a default response.
"/" | "/noscrape" => {
let pop_response = Request::new(Method::GET, FASTLY_API_DATACENTER_ENDPOINT)
.with_header("Fastly-Key", &fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME)?;
let body_str = pop_response.into_body_str();
let pop_vec: Vec<PopData> = serde_json::from_str(&body_str).unwrap();
let mut status_map: Option<HashMap<&str, &str>> = None;
let status_vec: Vec<StatusData>;
if the_path!= "/noscrape" {
let status_response = Request::new(Method::GET, POP_STATUS_API_ENDPOINT)
.with_header(header::ACCEPT, "application/json")
.send(POP_STATUS_API_BACKEND_NAME)?;
println!("Status response: {:?}", status_response.get_status());
let status_body_str = status_response.into_body_str();
// println!("Status body: {}", &status_body_str);
status_vec = serde_json::from_str(&status_body_str).unwrap();
status_map = Some(
status_vec
.iter()
.map(|status| (status.code.as_str(), status.status.as_str()))
.collect(),
);
}
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let modified_pop_status_map: HashMap<&str, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let pop_status_vec: Vec<PopStatusData> = pop_vec
.iter()
.map(|pop| {
let pop_code = pop.code.to_string();
let status = get_pop_status(&pop_code, &status_map, &modified_pop_status_map);
let shield = match &pop.shield {
Some(s) => s,
None => "",
};
PopStatusData {
code: pop_code,
name: pop.name.to_string(), | group: pop.group.to_string(),
shield: shield.to_string(),
status,
}
})
.collect();
let pop_status_response: PopStatusResponse = PopStatusResponse {
current_pop,
pop_status_data: pop_status_vec,
};
let pop_status_json = serde_json::to_string(&pop_status_response)?;
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(pop_status_json))
}
"/set_pop" => {
let modified_pop_status_opt= get_modified_pop_status(&service_id, &dict_id, &fsly_api_token);
if modified_pop_status_opt.is_none() {
return Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem accessing API\n"));
}
let modified_pop_status= modified_pop_status_opt.unwrap();
let mut modified_pop_status_map: HashMap<String, u8> =
serde_json::from_str(modified_pop_status.as_str()).unwrap();
let query_params: Vec<(String, String)> = req.get_query().unwrap();
println!("QP: {:?}", query_params);
if query_params.is_empty() {
let response = Response::from_body(modified_pop_status)
.with_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
);
return Ok(response);
}
for (pop, status) in query_params {
if pop == "*" {
if status == "-" {
modified_pop_status_map.clear();
} else {
modified_pop_status_map
.insert("*".to_string(), status.parse::<u8>().unwrap());
}
} else {
if status == "-" {
modified_pop_status_map.remove(pop.as_str());
} else {
modified_pop_status_map.insert(pop, status.parse::<u8>().unwrap());
}
}
}
// /service/service_id/dictionary/dictionary_id/item/dictionary_item_key
let the_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
let the_body = format!(
"item_value={}",
serde_json::to_string(&modified_pop_status_map)?
);
let dict_api_response = Request::new(Method::PUT, the_url)
.with_header("Fastly-Key", fsly_api_token)
.with_header(header::ACCEPT, "application/json")
.with_header(header::CONTENT_TYPE, "application/x-www-form-urlencoded")
.with_body(the_body)
.send(FASTLY_API_BACKEND_NAME)?;
if dict_api_response.get_status() == StatusCode::OK {
let body_str = dict_api_response.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
Ok(Response::from_status(StatusCode::OK)
.with_content_type(mime::APPLICATION_JSON)
.with_header(
&header::ACCESS_CONTROL_ALLOW_ORIGIN,
&HeaderValue::from_static("*"),
)
.with_body(dict_info.item_value))
} else {
Ok(Response::from_status(StatusCode::IM_A_TEAPOT)
.with_body_text_plain("Problem mofifying dictionary\n"))
}
}
// Catch all other requests and return a 404.
_ => Ok(Response::from_status(StatusCode::NOT_FOUND)
.with_body_text_plain("The page you requested could not be found\n")),
}
}
fn get_pop_status(
pop_code: &str,
status_map: &Option<HashMap<&str, &str>>,
modified_pop_status_vec: &HashMap<&str, u8>,
) -> String {
if modified_pop_status_vec.contains_key("*") {
let pc_index = modified_pop_status_vec["*"];
if pc_index < STATUS_VALUES.len() as u8 {
STATUS_VALUES[pc_index as usize].to_string()
} else {
get_status_from_map(pop_code, status_map)
}
} else {
match modified_pop_status_vec.get(pop_code) {
Some(pc_index) => STATUS_VALUES[*pc_index as usize].to_string(),
None => get_status_from_map(pop_code, status_map),
}
}
}
fn get_status_from_map(pop_code: &str, status_map: &Option<HashMap<&str, &str>>) -> String {
match status_map {
Some(map) => match map.get(pop_code) {
Some(status) => status.parse().unwrap(),
None => "Not Available".to_string(),
},
None => "Not Available".to_string(),
}
}
// This is calling the Fastly API to get the dictionary. You might ask why I'm not just accessing
// it on the edge. Reason being to avoid a race where we read it on the edge then write it with the
// API. Still not ideal as there could be a race with another pop but it will do until we have a
// KV store
fn get_modified_pop_status(service_id: &str, dict_id: &str, api_token: &str) -> Option<String> {
let dict_item_url = format!(
"{}/service/{}/dictionary/{}/item/modified_pop_status",
FASTLY_API_BASE, service_id, dict_id
);
// let modified_pop_status = app_data_dict.get("modified_pop_status").unwrap();
let modified_pop_status_resp = Request::new(Method::GET, dict_item_url)
.with_header("Fastly-Key", api_token)
.with_header(header::ACCEPT, "application/json")
.send(FASTLY_API_BACKEND_NAME).unwrap();
if modified_pop_status_resp.get_status() == StatusCode::OK {
let body_str = modified_pop_status_resp.into_body_str();
let dict_info: DictionaryInfo = serde_json::from_str(&body_str).unwrap();
let modified_pop_status = dict_info.item_value;
println!("MPS: {}", modified_pop_status);
Some(modified_pop_status)
} else {
None
}
} | latitude: pop.coordinates.latitude,
longitude: pop.coordinates.longitude, | random_line_split |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V |
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn rate_fn_of_times<'a>(
slot_starts_to_rate: &'a mut Vec<(T, i32)>,
) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if!self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
}
| {
if self.did_spike() {
self.spike_voltage.into()
} else {
(0.0 * si::V).into()
}
} | identifier_body |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage.into()
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn | <'a>(
slot_starts_to_rate: &'a mut Vec<(T, i32)>,
) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if!self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
}
| rate_fn_of_times | identifier_name |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage.into()
} else |
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn rate_fn_of_times<'a>(
slot_starts_to_rate: &'a mut Vec<(T, i32)>,
) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if!self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
}
| {
(0.0 * si::V).into()
} | conditional_block |
spike_generators.rs | //! This module represents neurons, generalized as "spike generators." To
//! support a wider range of abstractions, the input neurons are divided into
//! discrete and continuous implementations.
/// The general trait encapsulating a spike generator that has an output voltage
/// V.
pub trait SpikeGenerator<V> {
/// Get whether the neuron/generator has spiked at the update.
fn did_spike(&self) -> bool;
/// Gets the voltage of the neuron/generator at the current time.
fn get_voltage(&self) -> V;
}
/// An extension of a neuron that is in a hidden layer. Such a neuron will have
/// a voltage as well as a time-step as input.
pub trait InnerSpikeGenerator<V, T>: SpikeGenerator<V> {
fn handle_input(&mut self, input: V, dt: T);
}
/// An extension of a neuron for input neurons. These neurons can be advanced
/// with no inputs except the time-step.
pub trait InputSpikeGenerator<V, T>: SpikeGenerator<V> {
fn advance(&mut self, dt: T);
}
/// This module handles discrete neurons. Discrete neurons would be useful for
/// rate-encoding in SNNs and form a good basis for their continuous
/// counterparts.
pub mod discrete {
extern crate dimensioned as dim;
use dim::si;
use std::cmp::Ordering;
use super::{InputSpikeGenerator, SpikeGenerator};
/// An input neuron that spikes at a given time.
///
/// This can be used to represent simple visual inputs such as the neurons
/// that detect whether a particular area is a given color.
///
/// The timings of the spike would generally be based on the example being
/// shown to the SNN, hence is a part of feature extraction.
#[derive(Debug)]
pub struct SpikeAtTimes<T, I> {
times: Vec<T>,
time: T,
error_tolerance: T,
idx: usize,
spike_voltage: I,
}
impl<T: From<si::Second<f64>>, I> SpikeAtTimes<T, I> {
/// Makes a new input neuron that shall spike at the given times,
/// spiking at the given rate.
///
/// The tolerance is in case of floating-point imprecision or a
/// time-step that doesn't exactly hit a spike time. This is an
/// absolute error.
pub fn new(times: Vec<T>, tolerance: T, spike_voltage: I) -> SpikeAtTimes<T, I> {
SpikeAtTimes {
times: times,
time: (0.0 * si::S).into(),
error_tolerance: tolerance,
idx: 0,
spike_voltage: spike_voltage,
}
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
let idx = if self.idx >= self.times.len() {
self.times.len() - 1
} else {
self.idx
};
let time_diff = self.times[idx] - self.time;
return -self.error_tolerance < time_diff && time_diff < self.error_tolerance;
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage.into()
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtTimes<T, V>
where
// TODO: alias this as a trait?
T: From<si::Second<f64>>
+ Copy
+ PartialOrd<T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::Neg<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.time += dt.into();
while self.idx < self.times.len() && self.times[self.idx] < self.time {
self.idx += 1;
}
}
}
/// A neuron that will spike a given number of times between certain time
/// slots. (So it only means "rate" if the slot is one unit long.) This is
/// implemented by taking slots from "rate_at_time" and spiking that many
/// times in that slot.
pub struct SpikeAtRate<T, V> {
rate_at_time: Box<dyn Fn(T) -> Option<(i32, T)>>,
time: T,
slot_start_time: T,
slot_end_time: T,
spike_voltage: V,
current_rate: i32,
num_spiked: i32,
tolerance: T,
}
impl<T, V> SpikeAtRate<T, V>
where
T: From<si::Second<f64>> + PartialOrd + Copy,
{
/// Makes a new neuron that will spike at the rate indicated by invoking
/// the rate_fn at a time-step.
///
/// Args:
/// * `rate_fn`: Returns the rate at which the neuron should spike at at a given
/// time. It also returns a deadline for when all those spikes
/// should occur. If the function returns None, it is assumed that
/// the neuron is done spiking.
/// * `slot_end_time`: When the first starting_rate spikes should occur by.
/// * `spike_voltage`: The voltage to spike at when spiking.
/// * `starting_rate`: The initial rate to spike at.
/// * `tolerance`: "tolerance" is an implementation detail, but an important one: since
/// slots are subdivided to ensure the correct number of spikes in the slot
/// the tolerance is "how far from the starting of a sub-slot should the
/// spike be within." Hence, for a tolerance t, you want to advance in a
/// step t < dt < 2t to be sure that you hit every spike exactly once.
pub fn new(
rate_fn: Box<dyn Fn(T) -> Option<(i32, T)>>,
slot_end_time: T,
spike_voltage: V,
starting_rate: i32,
tolerance: T,
) -> Self {
SpikeAtRate {
rate_at_time: rate_fn,
time: (0.0 * si::S).into(),
slot_start_time: (0.0 * si::S).into(),
slot_end_time: slot_end_time,
spike_voltage: spike_voltage,
current_rate: starting_rate,
num_spiked: 0,
tolerance: tolerance,
}
}
/// Makes a function that, given the vector of slot start times and
/// rates within that slot, returns a function that would serve as a
/// `rate_fn` above.
///
/// As a side-effect, the input vector is lexicographically sorted based
/// on the partial ordering on T. (So if T is a float, the incomparable
/// values are all treated as equal, so use that at your own risk.)
pub fn rate_fn_of_times<'a>( | ) -> Box<dyn Fn(T) -> Option<(i32, T)> + 'a> {
slot_starts_to_rate.sort_unstable_by(|a, b| {
let (t1, r1) = a;
let (t2, r2) = b;
match t1.partial_cmp(t2) {
Option::None | Option::Some(Ordering::Equal) => r1.cmp(r2),
Option::Some(x) => x,
}
});
Box::new(move |time: T| {
let slot: Vec<&(T, i32)> = (*slot_starts_to_rate)
.iter()
.filter(|slt| time > slt.0)
.take(1)
.collect();
if slot.len() == 0 {
return Option::None;
}
let (new_slot_end, new_rate) = slot[0];
return Option::Some((*new_rate, *new_slot_end));
})
}
}
impl<T, V> SpikeGenerator<V> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>> + Copy + std::ops::Sub<Output = T>,
V: From<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
if self.current_rate <= 0 {
return false;
}
let spike_interval_len: si::Second<f64> =
((self.slot_end_time - self.slot_start_time).into()) / (self.current_rate as f64);
let adjusted_time = self.time.into()
- spike_interval_len * (self.num_spiked as f64)
- self.slot_start_time.into();
0.0 * si::S < adjusted_time && adjusted_time <= self.tolerance.into()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.spike_voltage
} else {
(0.0 * si::V).into()
}
}
}
impl<T, V> InputSpikeGenerator<V, T> for SpikeAtRate<T, V>
where
T: Into<si::Second<f64>>
+ Copy
+ std::ops::Sub<Output = T>
+ std::ops::AddAssign
+ PartialOrd<T>,
V: From<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
// We move the "spiked" counter first since the more usual usage
// pattern would need to read whether the neuron spiked after
// advancing and doing this state change after the ones below
// would actually mean that checking "did_spike" in a loop would
// actually miss every spike since this check would incorrectly
// increment self.num_spiked.
if self.did_spike() {
self.num_spiked += 1;
}
self.time += dt;
if self.time > self.slot_end_time && self.current_rate > -1 {
self.slot_start_time = self.slot_end_time;
if let Option::Some((new_rate, new_end)) = (*self.rate_at_time)(self.time) {
self.current_rate = new_rate;
self.slot_end_time = new_end;
self.num_spiked = 0;
} else {
self.current_rate = -1;
}
}
}
}
}
/// Ways of adding continuity to neuron implementations.
pub mod continuous {
extern crate dimensioned as dim;
use dim::si;
/// Adds a time-based voltage decay to the discrete neuron type D.
/// The neuron is at 0 voltage until it spikes. Then the voltage is left
/// to the spike_decay_fn. Since the spking is detected by querying the
/// wrapped discrete neuron, the precise timing of the spike may have an
/// error as large as the time step used to `advance` this neuron.
pub struct WithSpikeDecay<D, T, V> {
time_since_spike: T,
discrete_neuron: D,
spike_voltage: V,
spiked_yet: bool,
spike_decay_fn: Box<dyn Fn(T, V) -> V>,
}
impl<T, D, V> WithSpikeDecay<D, T, V>
where
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
/// Args:
/// * `discrete_neuron`: The discrete neuron to add a decay to.
/// * `spike_decay_fn`: The function to decay along. The first argument is the time of
/// the previous spike and the second is the voltage at the spike.
pub fn new(
discrete_neuron: D,
spike_decay_fn: Box<dyn Fn(T, V) -> V>) -> Self {
WithSpikeDecay {
time_since_spike: (0.0 * si::S).into(),
discrete_neuron: discrete_neuron,
spike_voltage: (0.0 * si::V).into(),
spiked_yet: false,
spike_decay_fn: spike_decay_fn,
}
}
/// Wraps a discrete neuron into one that exponentially decays after
/// spiking. The decay function outputted is V * a * e ^ (b * T) where V
/// is the previous spike voltage, T is the time since the previous spike,
/// * `spike_decay_scalar` is the scalar "a",
/// * and `spike_timing_scalar` is the scalar "b" (in the exponent)
pub fn exp_decay(
discrete_neuron: D,
spike_decay_scalar: f64,
spike_timing_scalar: f64,
) -> Self {
Self::new(
discrete_neuron,
Box::new(move |time: T, spike: V| {
((-(time.into() / si::S) * spike_timing_scalar).exp()
* (spike.into() / si::V)
* spike_decay_scalar
* si::V)
.into()
}),
)
}
}
impl<D, T, V> super::SpikeGenerator<V> for WithSpikeDecay<D, T, V>
where
D: super::SpikeGenerator<V>,
T: Into<si::Second<f64>> + Copy,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn did_spike(&self) -> bool {
self.discrete_neuron.did_spike()
}
fn get_voltage(&self) -> V {
if self.did_spike() {
self.discrete_neuron.get_voltage()
} else if!self.spiked_yet {
(0.0 * si::V).into()
} else {
// Haha function pointer go brr.
(*self.spike_decay_fn)(self.time_since_spike, self.spike_voltage)
}
}
}
impl<D, T, V> super::InputSpikeGenerator<V, T> for WithSpikeDecay<D, T, V>
where
D: super::InputSpikeGenerator<V, T>,
T: From<si::Second<f64>> + Into<si::Second<f64>> + Copy + std::ops::AddAssign,
V: From<si::Volt<f64>> + Into<si::Volt<f64>> + Copy,
{
fn advance(&mut self, dt: T) {
self.discrete_neuron.advance(dt);
if self.discrete_neuron.did_spike() {
self.spiked_yet = true;
self.time_since_spike = (0.0 * si::S).into();
self.spike_voltage = self.discrete_neuron.get_voltage();
return;
}
self.time_since_spike += dt;
}
}
} | slot_starts_to_rate: &'a mut Vec<(T, i32)>, | random_line_split |
fs.rs | use rlua::prelude::*;
use std::{
sync::Arc,
env,
fs::{self, OpenOptions},
io::{self, SeekFrom, prelude::*},
path::Path
};
use serde_json;
use rlua_serde;
use crate::bindings::system::LuaMetadata;
use regex::Regex;
//TODO: Move to having a common interface so IO can share the same binding
pub struct LuaFile(fs::File);
pub fn fs_open(_: &Lua, (path, mode): (String, Option<String>)) -> Result<LuaFile, LuaError> {
let mut option = OpenOptions::new();
if let Some(mode) = mode {
match mode.as_ref() {
"r" => option.read(true).write(false),
"w" => option.create(true).read(false).write(true),
"w+" => option.create(true).read(true).write(true).truncate(true),
"a" => option.append(true),
"rw" | _ => option.create(true).read(true).write(true),
};
} else {
option.create(true).read(true).write(true);
}
option.open(path)
.map(LuaFile)
.map_err(LuaError::external)
}
impl LuaUserData for LuaFile {
fn add_methods<'lua, M: LuaUserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("read", |_, this: &mut LuaFile, len: Option<usize>|{
let bytes = match len {
Some(len) => {
let mut bytes = vec![0u8; len];
this.0.read(&mut bytes).map_err(LuaError::external)?;
bytes
},
None => {
let mut bytes = vec![];
this.0.read_to_end(&mut bytes).map_err(LuaError::external)?;
bytes
}
};
Ok(bytes)
});
methods.add_method_mut("read_to_string", |_, this: &mut LuaFile, _: ()|{
let mut data = String::new();
this.0.read_to_string(&mut data).map_err(LuaError::external)?;
Ok(data)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, bytes: Vec<u8>|{
Ok(this.0.write(bytes.as_slice()).map_err(LuaError::external)?)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, str: String|{
Ok(this.0.write(str.as_bytes()).map_err(LuaError::external)?)
});
methods.add_method_mut("flush", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.flush().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_all", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_all().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_data", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_data().map_err(LuaError::external)?)
});
methods.add_method("metadata", |_, this: &LuaFile, _: ()| {
Ok(LuaMetadata(this.0.metadata().map_err(LuaError::external)?))
});
methods.add_method_mut("seek", |_, this: &mut LuaFile, (pos, size): (Option<String>, Option<usize>)| {
let size = size.unwrap_or(0);
let seekfrom = pos.and_then(|s_pos| {
Some(match s_pos.as_ref() {
"start" => SeekFrom::Start(size as u64),
"end" => SeekFrom::End(size as i64),
"current" | _ => SeekFrom::Current(size as i64),
})
}).unwrap_or(SeekFrom::Current(size as i64));
Ok(this.0.seek(seekfrom).map_err(LuaError::external)?)
});
}
}
pub fn init(lua: &Lua) -> crate::Result<()> {
let module = lua.create_table()?;
module.set("open", lua.create_function( fs_open)? )?;
module.set("canonicalize", lua.create_function( |lua, path: String| {
match fs::canonicalize(path).map_err(|err| LuaError::external(err)) {
Ok(i) => Ok(Some(lua.create_string(&i.to_str().unwrap()).unwrap())),
_ => Ok(None)
}
})? )?;
//Deprecated for path:create_dir
module.set("create_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
let result = match all {
Some(true) => fs::create_dir_all(path),
_ => fs::create_dir(path)
};
Ok(result.is_ok())
})? )?;
//Deprecated for path:read_dir
module.set("entries", lua.create_function( |lua, path: String| {
match fs::read_dir(path) {
Ok(iter) => {
let mut arc_iter = Arc::new(Some(iter));
let f = move |_, _: ()| {
let result = match Arc::get_mut(&mut arc_iter).expect("entries iterator is mutably borrowed") {
Some(iter) => match iter.next() {
Some(Ok(entry)) => Some(entry.file_name().into_string().unwrap()),
_ => None
},
None => None
};
if result.is_none() { *Arc::get_mut(&mut arc_iter).unwrap() = None; }
Ok(result)
};
Ok(lua.create_function_mut(f)?)
}, Err(err) => Err(LuaError::ExternalError(Arc::new(::failure::Error::from_boxed_compat(Box::new(err)))))
}
})? )?;
module.set("read_dir", lua.create_function( |lua, path: String| {
let mut _list: Vec<String> = Vec::new();
for entry in fs::read_dir(path).map_err(|err| LuaError::external(err))? {
let entry = entry.map_err(|err| LuaError::external(err))?;
_list.push(entry.path().file_name().unwrap_or_default().to_string_lossy().to_string());
}
let list_value: serde_json::Value = serde_json::to_value(_list).map_err(|err| LuaError::external(err) )?;
let lua_value = rlua_serde::to_value(lua, &list_value)?;
Ok(lua_value)
})?)?;
////Deprecated for fs:read
module.set("read_file", lua.create_function( |lua, path: String| {
let data = fs::read(path).map_err(|err| LuaError::external(err))?;
Ok(lua.create_string(&String::from_utf8_lossy(&data[..]).to_owned().to_string())?)
})?)?;
module.set("chdir", lua.create_function(|_, path: String| {
env::set_current_dir(path).map_err(LuaError::external)
})?)?;
module.set("current_dir", lua.create_function(|_, _:()| {
env::current_dir().map(|path| path.to_str().map(|s| s.to_string())).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:exists
module.set("exists", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).exists())
})?)?;
//Probably deprecate for path:is_file
module.set("is_file", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_file())
})?)?;
//Probably deprecate for path:is_dir
module.set("is_dir", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_dir())
})?)?;
module.set("symlink", lua.create_function( |_, (src_path, symlink_dest): (String, String)| {
create_symlink(src_path, symlink_dest).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:remove
module.set("remove_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
match all {
Some(true) => fs::remove_dir_all(&path).map_err(LuaError::external),
_ => fs::remove_dir(&path).map_err(LuaError::external)
}
})?)?;
//TODO: Rename to something suitable other than touch
//Probably deprecate for path:create_file
module.set("touch", lua.create_function( |_, path: String| {
fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.map(|_| ())
.map_err(LuaError::external)
})?)?;
module.set("copy_file", lua.create_function(|_, (src, dest): (String, String)| {
copy_file(src, dest)
})?)?;
// This binding has a known side effect that this doesn't copy.git directory
module.set("copy_dir", lua.create_function(|_, (src, dest): (String, String)| {
recursive_copy(src, dest).map_err(LuaError::external)
})?)?;
//Deprecated for fs:metadata
module.set("metadata", lua.create_function( |lua, path: String| {
match fs::metadata(path) {
Ok(md) => {
let table = lua.create_table()?;
table.set("type", {
let file_type = md.file_type();
if file_type.is_file() { "file" }
else if file_type.is_dir() { "directory" }
else { unreachable!() }
})?;
table.set("size", md.len())?;
// TODO: Unix permissions when in Unix
table.set("readonly", md.permissions().readonly())?;
table.set("created", md.created().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("accessed", md.accessed().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("modified", md.modified().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
Ok(Some(table))
},
_ => Ok(None)
}
})? )?;
lua.globals().set("fs", module)?;
Ok(())
}
//TODO: Have it set to use either `syslink_file` or `syslink_dir` depending on if the endpoint is a file or directory in the `src_path`
// Probably move functions into path binding.
#[cfg(target_family = "windows")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::windows::fs::symlink_file;
symlink_file(src_path, dest)
}
#[cfg(target_family = "unix")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::unix::fs::symlink;
symlink(src_path, dest)
}
fn copy_file<S: AsRef<Path>, D: AsRef<Path>>(src: S, dest: D) -> LuaResult<()> {
let mut dest = dest.as_ref().to_path_buf();
if dest.is_dir() {
let file_name = src.as_ref()
.file_name()
.map(|s| s.to_string_lossy().to_string())
.ok_or(LuaError::external(io::Error::from(io::ErrorKind::InvalidInput)))?;
dest.push(file_name);
};
fs::copy(src, dest).map(|_| ())
.map_err(LuaError::external)
}
fn recursive_copy<A: AsRef<Path>, B: AsRef<Path>>(src: A, dest: B) -> io::Result<()> | if src.is_file() {
fs::copy(src, &dest)?;
}
else {
fs::create_dir_all(&dest)?;
recursive_copy(src, &dest)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lua_fs () {
let lua = Lua::new();
init(&lua).unwrap();
lua.exec::<_, ()>(r#"
for entry in fs.entries("./") do
local md = fs.metadata(entry)
print(md.type.. ": ".. entry)
end
assert(fs.canonicalize("."), "expected path")
assert(fs.canonicalize("/no/such/path/here") == nil, "expected nil")
"#, None).unwrap();
}
}
| {
let path = src.as_ref();
if !src.as_ref().exists() {
return Err(io::Error::from(io::ErrorKind::NotFound));
}
if !dest.as_ref().exists() {
fs::create_dir(&dest)?;
}
for entry in path.read_dir()? {
let src = entry.map(|e| e.path())?;
let src_name = match src.file_name().map(|s| s.to_string_lossy().to_string()) {
Some(s) => s,
None => return Err(io::Error::from(io::ErrorKind::InvalidData))
};
let re = Regex::new(r"^\.git").unwrap();
// don't copy .git directory
if re.is_match(&src_name) {
continue;
}
let dest = dest.as_ref().join(src_name); | identifier_body |
fs.rs | use rlua::prelude::*;
use std::{
sync::Arc,
env,
fs::{self, OpenOptions},
io::{self, SeekFrom, prelude::*},
path::Path
};
use serde_json;
use rlua_serde;
use crate::bindings::system::LuaMetadata;
use regex::Regex;
//TODO: Move to having a common interface so IO can share the same binding
pub struct LuaFile(fs::File);
pub fn fs_open(_: &Lua, (path, mode): (String, Option<String>)) -> Result<LuaFile, LuaError> {
let mut option = OpenOptions::new();
if let Some(mode) = mode {
match mode.as_ref() {
"r" => option.read(true).write(false),
"w" => option.create(true).read(false).write(true),
"w+" => option.create(true).read(true).write(true).truncate(true),
"a" => option.append(true),
"rw" | _ => option.create(true).read(true).write(true),
};
} else {
option.create(true).read(true).write(true);
}
option.open(path)
.map(LuaFile)
.map_err(LuaError::external)
}
impl LuaUserData for LuaFile {
fn add_methods<'lua, M: LuaUserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("read", |_, this: &mut LuaFile, len: Option<usize>|{
let bytes = match len {
Some(len) => {
let mut bytes = vec![0u8; len];
this.0.read(&mut bytes).map_err(LuaError::external)?;
bytes
},
None => {
let mut bytes = vec![];
this.0.read_to_end(&mut bytes).map_err(LuaError::external)?;
bytes
}
};
Ok(bytes)
});
methods.add_method_mut("read_to_string", |_, this: &mut LuaFile, _: ()|{
let mut data = String::new();
this.0.read_to_string(&mut data).map_err(LuaError::external)?;
Ok(data)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, bytes: Vec<u8>|{
Ok(this.0.write(bytes.as_slice()).map_err(LuaError::external)?)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, str: String|{
Ok(this.0.write(str.as_bytes()).map_err(LuaError::external)?)
});
methods.add_method_mut("flush", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.flush().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_all", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_all().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_data", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_data().map_err(LuaError::external)?)
});
methods.add_method("metadata", |_, this: &LuaFile, _: ()| {
Ok(LuaMetadata(this.0.metadata().map_err(LuaError::external)?))
});
methods.add_method_mut("seek", |_, this: &mut LuaFile, (pos, size): (Option<String>, Option<usize>)| {
let size = size.unwrap_or(0);
let seekfrom = pos.and_then(|s_pos| {
Some(match s_pos.as_ref() {
"start" => SeekFrom::Start(size as u64),
"end" => SeekFrom::End(size as i64),
"current" | _ => SeekFrom::Current(size as i64),
})
}).unwrap_or(SeekFrom::Current(size as i64));
Ok(this.0.seek(seekfrom).map_err(LuaError::external)?)
});
}
}
pub fn init(lua: &Lua) -> crate::Result<()> {
let module = lua.create_table()?;
module.set("open", lua.create_function( fs_open)? )?;
module.set("canonicalize", lua.create_function( |lua, path: String| {
match fs::canonicalize(path).map_err(|err| LuaError::external(err)) {
Ok(i) => Ok(Some(lua.create_string(&i.to_str().unwrap()).unwrap())),
_ => Ok(None)
}
})? )?;
//Deprecated for path:create_dir
module.set("create_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
let result = match all {
Some(true) => fs::create_dir_all(path),
_ => fs::create_dir(path)
};
Ok(result.is_ok())
})? )?;
//Deprecated for path:read_dir
module.set("entries", lua.create_function( |lua, path: String| {
match fs::read_dir(path) {
Ok(iter) => {
let mut arc_iter = Arc::new(Some(iter));
let f = move |_, _: ()| {
let result = match Arc::get_mut(&mut arc_iter).expect("entries iterator is mutably borrowed") {
Some(iter) => match iter.next() {
Some(Ok(entry)) => Some(entry.file_name().into_string().unwrap()),
_ => None
},
None => None
};
if result.is_none() { *Arc::get_mut(&mut arc_iter).unwrap() = None; }
Ok(result)
};
Ok(lua.create_function_mut(f)?)
}, Err(err) => Err(LuaError::ExternalError(Arc::new(::failure::Error::from_boxed_compat(Box::new(err)))))
}
})? )?;
module.set("read_dir", lua.create_function( |lua, path: String| {
let mut _list: Vec<String> = Vec::new();
for entry in fs::read_dir(path).map_err(|err| LuaError::external(err))? {
let entry = entry.map_err(|err| LuaError::external(err))?;
_list.push(entry.path().file_name().unwrap_or_default().to_string_lossy().to_string());
}
let list_value: serde_json::Value = serde_json::to_value(_list).map_err(|err| LuaError::external(err) )?;
let lua_value = rlua_serde::to_value(lua, &list_value)?;
Ok(lua_value)
})?)?;
////Deprecated for fs:read
module.set("read_file", lua.create_function( |lua, path: String| {
let data = fs::read(path).map_err(|err| LuaError::external(err))?;
Ok(lua.create_string(&String::from_utf8_lossy(&data[..]).to_owned().to_string())?)
})?)?;
module.set("chdir", lua.create_function(|_, path: String| {
env::set_current_dir(path).map_err(LuaError::external)
})?)?;
module.set("current_dir", lua.create_function(|_, _:()| {
env::current_dir().map(|path| path.to_str().map(|s| s.to_string())).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:exists
module.set("exists", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).exists())
})?)?;
//Probably deprecate for path:is_file
module.set("is_file", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_file())
})?)?;
//Probably deprecate for path:is_dir
module.set("is_dir", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_dir())
})?)?;
module.set("symlink", lua.create_function( |_, (src_path, symlink_dest): (String, String)| {
create_symlink(src_path, symlink_dest).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:remove
module.set("remove_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
match all {
Some(true) => fs::remove_dir_all(&path).map_err(LuaError::external),
_ => fs::remove_dir(&path).map_err(LuaError::external)
}
})?)?;
//TODO: Rename to something suitable other than touch
//Probably deprecate for path:create_file
module.set("touch", lua.create_function( |_, path: String| {
fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.map(|_| ())
.map_err(LuaError::external)
})?)?;
module.set("copy_file", lua.create_function(|_, (src, dest): (String, String)| {
copy_file(src, dest)
})?)?;
// This binding has a known side effect that this doesn't copy.git directory
module.set("copy_dir", lua.create_function(|_, (src, dest): (String, String)| {
recursive_copy(src, dest).map_err(LuaError::external)
})?)?;
//Deprecated for fs:metadata
module.set("metadata", lua.create_function( |lua, path: String| {
match fs::metadata(path) {
Ok(md) => {
let table = lua.create_table()?;
table.set("type", {
let file_type = md.file_type();
if file_type.is_file() { "file" }
else if file_type.is_dir() { "directory" }
else { unreachable!() }
})?;
table.set("size", md.len())?;
// TODO: Unix permissions when in Unix
table.set("readonly", md.permissions().readonly())?;
table.set("created", md.created().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("accessed", md.accessed().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("modified", md.modified().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
Ok(Some(table))
},
_ => Ok(None)
}
})? )?;
lua.globals().set("fs", module)?;
Ok(())
}
//TODO: Have it set to use either `syslink_file` or `syslink_dir` depending on if the endpoint is a file or directory in the `src_path`
// Probably move functions into path binding.
#[cfg(target_family = "windows")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::windows::fs::symlink_file;
symlink_file(src_path, dest)
}
#[cfg(target_family = "unix")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::unix::fs::symlink;
symlink(src_path, dest)
}
fn copy_file<S: AsRef<Path>, D: AsRef<Path>>(src: S, dest: D) -> LuaResult<()> {
let mut dest = dest.as_ref().to_path_buf();
if dest.is_dir() {
let file_name = src.as_ref()
.file_name()
.map(|s| s.to_string_lossy().to_string())
.ok_or(LuaError::external(io::Error::from(io::ErrorKind::InvalidInput)))?;
dest.push(file_name);
};
fs::copy(src, dest).map(|_| ())
.map_err(LuaError::external)
}
fn | <A: AsRef<Path>, B: AsRef<Path>>(src: A, dest: B) -> io::Result<()> {
let path = src.as_ref();
if!src.as_ref().exists() {
return Err(io::Error::from(io::ErrorKind::NotFound));
}
if!dest.as_ref().exists() {
fs::create_dir(&dest)?;
}
for entry in path.read_dir()? {
let src = entry.map(|e| e.path())?;
let src_name = match src.file_name().map(|s| s.to_string_lossy().to_string()) {
Some(s) => s,
None => return Err(io::Error::from(io::ErrorKind::InvalidData))
};
let re = Regex::new(r"^\.git").unwrap();
// don't copy.git directory
if re.is_match(&src_name) {
continue;
}
let dest = dest.as_ref().join(src_name);
if src.is_file() {
fs::copy(src, &dest)?;
}
else {
fs::create_dir_all(&dest)?;
recursive_copy(src, &dest)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lua_fs () {
let lua = Lua::new();
init(&lua).unwrap();
lua.exec::<_, ()>(r#"
for entry in fs.entries("./") do
local md = fs.metadata(entry)
print(md.type.. ": ".. entry)
end
assert(fs.canonicalize("."), "expected path")
assert(fs.canonicalize("/no/such/path/here") == nil, "expected nil")
"#, None).unwrap();
}
}
| recursive_copy | identifier_name |
fs.rs | use rlua::prelude::*;
use std::{
sync::Arc,
env,
fs::{self, OpenOptions},
io::{self, SeekFrom, prelude::*},
path::Path
};
use serde_json;
use rlua_serde;
use crate::bindings::system::LuaMetadata;
use regex::Regex;
//TODO: Move to having a common interface so IO can share the same binding
pub struct LuaFile(fs::File);
pub fn fs_open(_: &Lua, (path, mode): (String, Option<String>)) -> Result<LuaFile, LuaError> {
let mut option = OpenOptions::new();
if let Some(mode) = mode {
match mode.as_ref() {
"r" => option.read(true).write(false),
"w" => option.create(true).read(false).write(true),
"w+" => option.create(true).read(true).write(true).truncate(true),
"a" => option.append(true),
"rw" | _ => option.create(true).read(true).write(true),
};
} else {
option.create(true).read(true).write(true);
}
option.open(path)
.map(LuaFile)
.map_err(LuaError::external)
}
impl LuaUserData for LuaFile {
fn add_methods<'lua, M: LuaUserDataMethods<'lua, Self>>(methods: &mut M) {
methods.add_method_mut("read", |_, this: &mut LuaFile, len: Option<usize>|{
let bytes = match len {
Some(len) => {
let mut bytes = vec![0u8; len];
this.0.read(&mut bytes).map_err(LuaError::external)?;
bytes
},
None => {
let mut bytes = vec![];
this.0.read_to_end(&mut bytes).map_err(LuaError::external)?;
bytes
}
};
Ok(bytes)
});
methods.add_method_mut("read_to_string", |_, this: &mut LuaFile, _: ()|{
let mut data = String::new();
this.0.read_to_string(&mut data).map_err(LuaError::external)?;
Ok(data)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, bytes: Vec<u8>|{
Ok(this.0.write(bytes.as_slice()).map_err(LuaError::external)?)
});
methods.add_method_mut("write", |_, this: &mut LuaFile, str: String|{
Ok(this.0.write(str.as_bytes()).map_err(LuaError::external)?)
});
methods.add_method_mut("flush", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.flush().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_all", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_all().map_err(LuaError::external)?)
});
methods.add_method_mut("sync_data", |_, this: &mut LuaFile, _: ()|{
Ok(this.0.sync_data().map_err(LuaError::external)?)
});
methods.add_method("metadata", |_, this: &LuaFile, _: ()| {
Ok(LuaMetadata(this.0.metadata().map_err(LuaError::external)?))
});
methods.add_method_mut("seek", |_, this: &mut LuaFile, (pos, size): (Option<String>, Option<usize>)| {
let size = size.unwrap_or(0);
let seekfrom = pos.and_then(|s_pos| {
Some(match s_pos.as_ref() {
"start" => SeekFrom::Start(size as u64),
"end" => SeekFrom::End(size as i64),
"current" | _ => SeekFrom::Current(size as i64),
})
}).unwrap_or(SeekFrom::Current(size as i64));
Ok(this.0.seek(seekfrom).map_err(LuaError::external)?)
});
}
}
pub fn init(lua: &Lua) -> crate::Result<()> {
let module = lua.create_table()?;
module.set("open", lua.create_function( fs_open)? )?;
module.set("canonicalize", lua.create_function( |lua, path: String| {
match fs::canonicalize(path).map_err(|err| LuaError::external(err)) {
Ok(i) => Ok(Some(lua.create_string(&i.to_str().unwrap()).unwrap())),
_ => Ok(None)
}
})? )?;
//Deprecated for path:create_dir
module.set("create_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
let result = match all {
Some(true) => fs::create_dir_all(path),
_ => fs::create_dir(path)
};
Ok(result.is_ok())
})? )?;
//Deprecated for path:read_dir
module.set("entries", lua.create_function( |lua, path: String| {
match fs::read_dir(path) {
Ok(iter) => {
let mut arc_iter = Arc::new(Some(iter));
let f = move |_, _: ()| {
let result = match Arc::get_mut(&mut arc_iter).expect("entries iterator is mutably borrowed") {
Some(iter) => match iter.next() {
Some(Ok(entry)) => Some(entry.file_name().into_string().unwrap()),
_ => None
},
None => None
};
if result.is_none() { *Arc::get_mut(&mut arc_iter).unwrap() = None; }
Ok(result)
};
Ok(lua.create_function_mut(f)?)
}, Err(err) => Err(LuaError::ExternalError(Arc::new(::failure::Error::from_boxed_compat(Box::new(err)))))
}
})? )?;
module.set("read_dir", lua.create_function( |lua, path: String| {
let mut _list: Vec<String> = Vec::new();
for entry in fs::read_dir(path).map_err(|err| LuaError::external(err))? {
let entry = entry.map_err(|err| LuaError::external(err))?;
_list.push(entry.path().file_name().unwrap_or_default().to_string_lossy().to_string());
}
let list_value: serde_json::Value = serde_json::to_value(_list).map_err(|err| LuaError::external(err) )?;
let lua_value = rlua_serde::to_value(lua, &list_value)?;
Ok(lua_value)
})?)?;
////Deprecated for fs:read
module.set("read_file", lua.create_function( |lua, path: String| {
let data = fs::read(path).map_err(|err| LuaError::external(err))?;
Ok(lua.create_string(&String::from_utf8_lossy(&data[..]).to_owned().to_string())?)
})?)?;
module.set("chdir", lua.create_function(|_, path: String| {
env::set_current_dir(path).map_err(LuaError::external)
})?)?;
module.set("current_dir", lua.create_function(|_, _:()| {
env::current_dir().map(|path| path.to_str().map(|s| s.to_string())).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:exists
module.set("exists", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).exists())
})?)?;
//Probably deprecate for path:is_file
module.set("is_file", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_file())
})?)?;
//Probably deprecate for path:is_dir
module.set("is_dir", lua.create_function( |_, path: String| {
Ok(::std::path::Path::new(&path).is_dir())
})?)?;
module.set("symlink", lua.create_function( |_, (src_path, symlink_dest): (String, String)| {
create_symlink(src_path, symlink_dest).map_err(LuaError::external)
})?)?;
//Probably deprecate for path:remove
module.set("remove_dir", lua.create_function( |_, (path, all): (String, Option<bool>)| {
match all {
Some(true) => fs::remove_dir_all(&path).map_err(LuaError::external),
_ => fs::remove_dir(&path).map_err(LuaError::external)
}
})?)?;
| fs::OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.map(|_| ())
.map_err(LuaError::external)
})?)?;
module.set("copy_file", lua.create_function(|_, (src, dest): (String, String)| {
copy_file(src, dest)
})?)?;
// This binding has a known side effect that this doesn't copy.git directory
module.set("copy_dir", lua.create_function(|_, (src, dest): (String, String)| {
recursive_copy(src, dest).map_err(LuaError::external)
})?)?;
//Deprecated for fs:metadata
module.set("metadata", lua.create_function( |lua, path: String| {
match fs::metadata(path) {
Ok(md) => {
let table = lua.create_table()?;
table.set("type", {
let file_type = md.file_type();
if file_type.is_file() { "file" }
else if file_type.is_dir() { "directory" }
else { unreachable!() }
})?;
table.set("size", md.len())?;
// TODO: Unix permissions when in Unix
table.set("readonly", md.permissions().readonly())?;
table.set("created", md.created().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("accessed", md.accessed().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
table.set("modified", md.modified().map(|time| time.duration_since(::std::time::SystemTime::UNIX_EPOCH).map(|s| s.as_secs()).unwrap_or(0)).ok())?;
Ok(Some(table))
},
_ => Ok(None)
}
})? )?;
lua.globals().set("fs", module)?;
Ok(())
}
//TODO: Have it set to use either `syslink_file` or `syslink_dir` depending on if the endpoint is a file or directory in the `src_path`
// Probably move functions into path binding.
#[cfg(target_family = "windows")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::windows::fs::symlink_file;
symlink_file(src_path, dest)
}
#[cfg(target_family = "unix")]
fn create_symlink(src_path: String, dest: String) -> std::io::Result<()> {
use std::os::unix::fs::symlink;
symlink(src_path, dest)
}
fn copy_file<S: AsRef<Path>, D: AsRef<Path>>(src: S, dest: D) -> LuaResult<()> {
let mut dest = dest.as_ref().to_path_buf();
if dest.is_dir() {
let file_name = src.as_ref()
.file_name()
.map(|s| s.to_string_lossy().to_string())
.ok_or(LuaError::external(io::Error::from(io::ErrorKind::InvalidInput)))?;
dest.push(file_name);
};
fs::copy(src, dest).map(|_| ())
.map_err(LuaError::external)
}
fn recursive_copy<A: AsRef<Path>, B: AsRef<Path>>(src: A, dest: B) -> io::Result<()> {
let path = src.as_ref();
if!src.as_ref().exists() {
return Err(io::Error::from(io::ErrorKind::NotFound));
}
if!dest.as_ref().exists() {
fs::create_dir(&dest)?;
}
for entry in path.read_dir()? {
let src = entry.map(|e| e.path())?;
let src_name = match src.file_name().map(|s| s.to_string_lossy().to_string()) {
Some(s) => s,
None => return Err(io::Error::from(io::ErrorKind::InvalidData))
};
let re = Regex::new(r"^\.git").unwrap();
// don't copy.git directory
if re.is_match(&src_name) {
continue;
}
let dest = dest.as_ref().join(src_name);
if src.is_file() {
fs::copy(src, &dest)?;
}
else {
fs::create_dir_all(&dest)?;
recursive_copy(src, &dest)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn lua_fs () {
let lua = Lua::new();
init(&lua).unwrap();
lua.exec::<_, ()>(r#"
for entry in fs.entries("./") do
local md = fs.metadata(entry)
print(md.type.. ": ".. entry)
end
assert(fs.canonicalize("."), "expected path")
assert(fs.canonicalize("/no/such/path/here") == nil, "expected nil")
"#, None).unwrap();
}
} | //TODO: Rename to something suitable other than touch
//Probably deprecate for path:create_file
module.set("touch", lua.create_function( |_, path: String| { | random_line_split |
resolver.rs | use std::collections::HashMap;
use std::rc::Rc;
use crate::ast::{Expr, Identifier, Literal, Stmt, VisitorMut};
use crate::environment::Environment;
use crate::error::Reporter;
use crate::token::Position;
#[derive(Clone, PartialEq)]
pub enum FunctionKind {
None,
Function,
// TODO: add more kinds supposedly...
}
pub struct Resolver {
// track what things are currently in scope, for local block scopes
// (global scope is not tracked)
scopes: Vec<HashMap<String, bool>>,
// track if we are currently in a function, and if so what kind
current_fn: FunctionKind,
// for reporting errors found during this stage
err_reporter: Box<Reporter>,
// keep track of errors encountered
num_errors: u64,
}
impl Resolver {
pub fn new<R: Reporter +'static>(err_reporter: R) -> Self {
Resolver {
scopes: Vec::new(),
// start out at the top level
current_fn: FunctionKind::None,
err_reporter: Box::new(err_reporter),
num_errors: 0,
}
}
pub fn resolve(&mut self, statements: &mut Vec<Stmt>) -> Result<(), String> {
let environment = Environment::new(None);
for s in statements {
// visit all the statements, and catch any errors
match self.visit_stmt(s, &environment) {
Ok(_) => (),
Err(_) => {
self.num_errors += 1;
}
}
}
if self.num_errors > 0 {
Err(format!("resolver encountered {} error(s)", self.num_errors))
} else {
Ok(())
}
}
// report an error
pub fn error(&mut self, pos: Position, msg: &str) {
self.err_reporter.report(msg, "here", &pos);
self.num_errors += 1;
}
// start a new scope
pub fn begin_scope(&mut self) |
// exit the current scope
pub fn end_scope(&mut self) {
self.scopes.pop();
}
// declare a variable in the current scope
pub fn declare(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// check if this has already been declared
if scope.contains_key(&ident.name.to_string()) {
// report the error, but don't return it
self.error(
ident.pos.clone(),
&format!("variable `{}` re-declared in local scope", ident.name),
);
} else {
// mark that the var exists, but is not yet initialized
scope.insert(ident.name.to_string(), false);
}
}
}
}
// define a variable in the current scope
pub fn define(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// mark that the var exists, and is now initialized
scope.insert(ident.name.to_string(), true);
}
}
}
// figure out where the var will resolve, and
// store that in the interpreter
pub fn resolve_local(&mut self, name: &str, resolved_dist: &mut Option<usize>) {
// start at the innermost scope and work outwards
for (dist, scope) in self.scopes.iter().rev().enumerate() {
if scope.contains_key(name) {
// NOTE:
// For the book this info is stored in a HashMap in the interpreter,
// like HashMap<Expr, u64>,
// which I tried, but then `Eq` and `Hash` have to be derived for all kinds
// of things, and `f64` doesn't implement `Eq`, and I don't want to manually
// implement it, not to mention `Hash` (which I didn't try).
//
// So, where should I store this info?
// From the book: "One obvious place is right in the syntax tree node itself."
// (the book does not take that approach, because "it would require mucking
// around with our syntax tree generator")
//
// I'm not using their generator anyway, so that's where I'm going to store
// this info - in the AST node itself.
*resolved_dist = Some(dist);
return;
}
}
// not found, assume it's global
}
pub fn resolve_function(
&mut self,
params: &Vec<Identifier>,
body: &mut Stmt,
env: &Rc<Environment>,
kind: FunctionKind,
) -> Result<(), String> {
// use the call stack to save the enclosing function kind,
// then set the current one
let enclosing_fn = self.current_fn.clone();
self.current_fn = kind;
// create a new scope for the function body
self.begin_scope();
// bind vars for each of the function parameters
for param in params {
self.declare(param);
self.define(param);
}
self.visit_stmt(body, env)?;
self.end_scope();
// back to whatever function may be enclosing this one
self.current_fn = enclosing_fn;
Ok(())
}
}
// mut because the resolver needs to modify Expr with resolved distance
impl VisitorMut<()> for Resolver {
type Error = String;
fn visit_stmt(&mut self, s: &mut Stmt, env: &Rc<Environment>) -> Result<(), String> {
match s {
Stmt::Block(statements) => {
// blocks create the local scopes for statements
self.begin_scope();
for stmt in statements {
// just have to resolve each statement in turn
self.visit_stmt(stmt, env)?;
}
self.end_scope();
}
Stmt::Expression(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Function(name, params, ref mut body) => {
// functions bind var names and create a local scope
// first, handle the binding of the function name
// (eagerly, so the function can recursively refer to itself)
self.declare(name);
self.define(name);
// then handle the function body
self.resolve_function(params, body, env, FunctionKind::Function)?;
}
Stmt::If(ref mut if_expr, ref mut then_stmt, ref mut opt_else_stmt) => {
// resolve the condition and both branches
self.visit_expr(if_expr, env)?;
self.visit_stmt(then_stmt, env)?;
if let Some(s) = opt_else_stmt {
self.visit_stmt(s, env)?;
}
}
Stmt::Print(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Return(ref mut expr) => {
// check that we are actually in a function
// TODO: this should probably use the position of the Stmt
// (BUT, there is not Position for Stmt, so have to implement that...)
if self.current_fn == FunctionKind::None {
self.error(expr.position().clone(), "cannot return from top-level code");
}
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Var(name, ref mut expr) => {
// this adds a new entry to the innermost scope
// variable binding is split into 2 steps - declaring and defining
self.declare(name);
self.visit_expr(expr, env)?;
self.define(name);
}
Stmt::While(ref mut condition_expr, ref mut body) => {
// resolve the condition and body
self.visit_expr(condition_expr, env)?;
self.visit_stmt(body, env)?;
}
}
Ok(())
}
fn visit_expr(&mut self, e: &mut Expr, env: &Rc<Environment>) -> Result<(), String> {
match e {
Expr::Assign(_pos, var_name, ref mut expr, ref mut resolved_vars) => {
// resolve the expr first in case it also contains other vars
self.visit_expr(expr, env)?;
// then resolve the var being assigned to
self.resolve_local(var_name, resolved_vars);
}
Expr::Binary(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve both operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Call(_pos, ref mut callee_expr, args) => {
// resolve the thing being called
self.visit_expr(callee_expr, env)?;
// then walk the arg list and resolve those
for arg in args {
self.visit_expr(arg, env)?;
}
}
Expr::Grouping(_pos, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Expr::Literal(_pos, _lit) => {
// nothing to do - literals don't mention vars, and don't have subexpressions
}
Expr::Logical(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve body operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Unary(_pos, _op, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the operand
}
Expr::Variable(pos, name, ref mut resolved_vars) => {
// have to check the scope maps to resolve var expressions
match self.scopes.last() {
None => (),
Some(scope) => {
// check if the var is referring to itself in its initializer
if scope.get(name) == Some(&false) {
self.error(pos.clone(), "cannot read local var in its initializer");
}
}
}
// actually resolve the var
self.resolve_local(name, resolved_vars);
}
}
Ok(())
}
fn visit_literal(&self, _l: &Literal, _env: &Rc<Environment>) -> Result<(), String> {
// nothing to do for these - not going to actually call the visit method above
Ok(())
}
}
| {
self.scopes.push(HashMap::new());
} | identifier_body |
resolver.rs | use std::collections::HashMap;
use std::rc::Rc;
use crate::ast::{Expr, Identifier, Literal, Stmt, VisitorMut};
use crate::environment::Environment;
use crate::error::Reporter;
use crate::token::Position;
#[derive(Clone, PartialEq)]
pub enum FunctionKind {
None,
Function,
// TODO: add more kinds supposedly...
}
pub struct Resolver {
// track what things are currently in scope, for local block scopes
// (global scope is not tracked)
scopes: Vec<HashMap<String, bool>>,
// track if we are currently in a function, and if so what kind
current_fn: FunctionKind,
// for reporting errors found during this stage
err_reporter: Box<Reporter>,
// keep track of errors encountered
num_errors: u64,
}
impl Resolver {
pub fn new<R: Reporter +'static>(err_reporter: R) -> Self {
Resolver {
scopes: Vec::new(),
// start out at the top level
current_fn: FunctionKind::None,
err_reporter: Box::new(err_reporter),
num_errors: 0,
}
}
pub fn resolve(&mut self, statements: &mut Vec<Stmt>) -> Result<(), String> {
let environment = Environment::new(None);
for s in statements {
// visit all the statements, and catch any errors
match self.visit_stmt(s, &environment) {
Ok(_) => (),
Err(_) => {
self.num_errors += 1;
}
}
}
if self.num_errors > 0 {
Err(format!("resolver encountered {} error(s)", self.num_errors))
} else {
Ok(())
}
}
// report an error
pub fn error(&mut self, pos: Position, msg: &str) {
self.err_reporter.report(msg, "here", &pos);
self.num_errors += 1;
}
// start a new scope
pub fn begin_scope(&mut self) {
self.scopes.push(HashMap::new());
}
// exit the current scope
pub fn end_scope(&mut self) {
self.scopes.pop();
}
// declare a variable in the current scope
pub fn | (&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// check if this has already been declared
if scope.contains_key(&ident.name.to_string()) {
// report the error, but don't return it
self.error(
ident.pos.clone(),
&format!("variable `{}` re-declared in local scope", ident.name),
);
} else {
// mark that the var exists, but is not yet initialized
scope.insert(ident.name.to_string(), false);
}
}
}
}
// define a variable in the current scope
pub fn define(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// mark that the var exists, and is now initialized
scope.insert(ident.name.to_string(), true);
}
}
}
// figure out where the var will resolve, and
// store that in the interpreter
pub fn resolve_local(&mut self, name: &str, resolved_dist: &mut Option<usize>) {
// start at the innermost scope and work outwards
for (dist, scope) in self.scopes.iter().rev().enumerate() {
if scope.contains_key(name) {
// NOTE:
// For the book this info is stored in a HashMap in the interpreter,
// like HashMap<Expr, u64>,
// which I tried, but then `Eq` and `Hash` have to be derived for all kinds
// of things, and `f64` doesn't implement `Eq`, and I don't want to manually
// implement it, not to mention `Hash` (which I didn't try).
//
// So, where should I store this info?
// From the book: "One obvious place is right in the syntax tree node itself."
// (the book does not take that approach, because "it would require mucking
// around with our syntax tree generator")
//
// I'm not using their generator anyway, so that's where I'm going to store
// this info - in the AST node itself.
*resolved_dist = Some(dist);
return;
}
}
// not found, assume it's global
}
pub fn resolve_function(
&mut self,
params: &Vec<Identifier>,
body: &mut Stmt,
env: &Rc<Environment>,
kind: FunctionKind,
) -> Result<(), String> {
// use the call stack to save the enclosing function kind,
// then set the current one
let enclosing_fn = self.current_fn.clone();
self.current_fn = kind;
// create a new scope for the function body
self.begin_scope();
// bind vars for each of the function parameters
for param in params {
self.declare(param);
self.define(param);
}
self.visit_stmt(body, env)?;
self.end_scope();
// back to whatever function may be enclosing this one
self.current_fn = enclosing_fn;
Ok(())
}
}
// mut because the resolver needs to modify Expr with resolved distance
impl VisitorMut<()> for Resolver {
type Error = String;
fn visit_stmt(&mut self, s: &mut Stmt, env: &Rc<Environment>) -> Result<(), String> {
match s {
Stmt::Block(statements) => {
// blocks create the local scopes for statements
self.begin_scope();
for stmt in statements {
// just have to resolve each statement in turn
self.visit_stmt(stmt, env)?;
}
self.end_scope();
}
Stmt::Expression(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Function(name, params, ref mut body) => {
// functions bind var names and create a local scope
// first, handle the binding of the function name
// (eagerly, so the function can recursively refer to itself)
self.declare(name);
self.define(name);
// then handle the function body
self.resolve_function(params, body, env, FunctionKind::Function)?;
}
Stmt::If(ref mut if_expr, ref mut then_stmt, ref mut opt_else_stmt) => {
// resolve the condition and both branches
self.visit_expr(if_expr, env)?;
self.visit_stmt(then_stmt, env)?;
if let Some(s) = opt_else_stmt {
self.visit_stmt(s, env)?;
}
}
Stmt::Print(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Return(ref mut expr) => {
// check that we are actually in a function
// TODO: this should probably use the position of the Stmt
// (BUT, there is not Position for Stmt, so have to implement that...)
if self.current_fn == FunctionKind::None {
self.error(expr.position().clone(), "cannot return from top-level code");
}
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Var(name, ref mut expr) => {
// this adds a new entry to the innermost scope
// variable binding is split into 2 steps - declaring and defining
self.declare(name);
self.visit_expr(expr, env)?;
self.define(name);
}
Stmt::While(ref mut condition_expr, ref mut body) => {
// resolve the condition and body
self.visit_expr(condition_expr, env)?;
self.visit_stmt(body, env)?;
}
}
Ok(())
}
fn visit_expr(&mut self, e: &mut Expr, env: &Rc<Environment>) -> Result<(), String> {
match e {
Expr::Assign(_pos, var_name, ref mut expr, ref mut resolved_vars) => {
// resolve the expr first in case it also contains other vars
self.visit_expr(expr, env)?;
// then resolve the var being assigned to
self.resolve_local(var_name, resolved_vars);
}
Expr::Binary(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve both operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Call(_pos, ref mut callee_expr, args) => {
// resolve the thing being called
self.visit_expr(callee_expr, env)?;
// then walk the arg list and resolve those
for arg in args {
self.visit_expr(arg, env)?;
}
}
Expr::Grouping(_pos, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Expr::Literal(_pos, _lit) => {
// nothing to do - literals don't mention vars, and don't have subexpressions
}
Expr::Logical(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve body operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Unary(_pos, _op, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the operand
}
Expr::Variable(pos, name, ref mut resolved_vars) => {
// have to check the scope maps to resolve var expressions
match self.scopes.last() {
None => (),
Some(scope) => {
// check if the var is referring to itself in its initializer
if scope.get(name) == Some(&false) {
self.error(pos.clone(), "cannot read local var in its initializer");
}
}
}
// actually resolve the var
self.resolve_local(name, resolved_vars);
}
}
Ok(())
}
fn visit_literal(&self, _l: &Literal, _env: &Rc<Environment>) -> Result<(), String> {
// nothing to do for these - not going to actually call the visit method above
Ok(())
}
}
| declare | identifier_name |
resolver.rs | use std::collections::HashMap;
use std::rc::Rc;
use crate::ast::{Expr, Identifier, Literal, Stmt, VisitorMut};
use crate::environment::Environment;
use crate::error::Reporter;
use crate::token::Position;
#[derive(Clone, PartialEq)]
pub enum FunctionKind {
None,
Function,
// TODO: add more kinds supposedly...
}
pub struct Resolver {
// track what things are currently in scope, for local block scopes
// (global scope is not tracked)
scopes: Vec<HashMap<String, bool>>,
// track if we are currently in a function, and if so what kind
current_fn: FunctionKind,
// for reporting errors found during this stage
err_reporter: Box<Reporter>,
// keep track of errors encountered
num_errors: u64,
}
impl Resolver {
pub fn new<R: Reporter +'static>(err_reporter: R) -> Self {
Resolver {
scopes: Vec::new(),
// start out at the top level
current_fn: FunctionKind::None,
err_reporter: Box::new(err_reporter),
num_errors: 0,
}
}
pub fn resolve(&mut self, statements: &mut Vec<Stmt>) -> Result<(), String> {
let environment = Environment::new(None);
for s in statements {
// visit all the statements, and catch any errors
match self.visit_stmt(s, &environment) {
Ok(_) => (),
Err(_) => {
self.num_errors += 1;
}
}
}
if self.num_errors > 0 {
Err(format!("resolver encountered {} error(s)", self.num_errors))
} else {
Ok(())
}
}
// report an error
pub fn error(&mut self, pos: Position, msg: &str) {
self.err_reporter.report(msg, "here", &pos);
self.num_errors += 1;
}
// start a new scope
pub fn begin_scope(&mut self) {
self.scopes.push(HashMap::new());
}
// exit the current scope
pub fn end_scope(&mut self) {
self.scopes.pop();
}
// declare a variable in the current scope
pub fn declare(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// check if this has already been declared
if scope.contains_key(&ident.name.to_string()) {
// report the error, but don't return it
self.error(
ident.pos.clone(),
&format!("variable `{}` re-declared in local scope", ident.name),
);
} else {
// mark that the var exists, but is not yet initialized
scope.insert(ident.name.to_string(), false);
}
}
}
}
// define a variable in the current scope
pub fn define(&mut self, ident: &Identifier) {
// try to access the top element of the stack
match self.scopes.last_mut() {
// if empty, do nothing (don't worry about global vars)
None => (),
Some(scope) => {
// mark that the var exists, and is now initialized
scope.insert(ident.name.to_string(), true);
}
}
}
// figure out where the var will resolve, and
// store that in the interpreter
pub fn resolve_local(&mut self, name: &str, resolved_dist: &mut Option<usize>) {
// start at the innermost scope and work outwards
for (dist, scope) in self.scopes.iter().rev().enumerate() {
if scope.contains_key(name) {
// NOTE:
// For the book this info is stored in a HashMap in the interpreter,
// like HashMap<Expr, u64>,
// which I tried, but then `Eq` and `Hash` have to be derived for all kinds
// of things, and `f64` doesn't implement `Eq`, and I don't want to manually
// implement it, not to mention `Hash` (which I didn't try).
//
// So, where should I store this info?
// From the book: "One obvious place is right in the syntax tree node itself."
// (the book does not take that approach, because "it would require mucking
// around with our syntax tree generator")
//
// I'm not using their generator anyway, so that's where I'm going to store
// this info - in the AST node itself.
*resolved_dist = Some(dist);
return;
}
}
// not found, assume it's global
}
pub fn resolve_function(
&mut self,
params: &Vec<Identifier>,
body: &mut Stmt,
env: &Rc<Environment>,
kind: FunctionKind,
) -> Result<(), String> {
// use the call stack to save the enclosing function kind,
// then set the current one
let enclosing_fn = self.current_fn.clone();
self.current_fn = kind;
// create a new scope for the function body
self.begin_scope();
// bind vars for each of the function parameters
for param in params {
self.declare(param);
self.define(param);
}
self.visit_stmt(body, env)?;
self.end_scope();
// back to whatever function may be enclosing this one
self.current_fn = enclosing_fn;
Ok(())
}
}
// mut because the resolver needs to modify Expr with resolved distance
impl VisitorMut<()> for Resolver {
type Error = String;
fn visit_stmt(&mut self, s: &mut Stmt, env: &Rc<Environment>) -> Result<(), String> {
match s {
Stmt::Block(statements) => {
// blocks create the local scopes for statements
self.begin_scope();
for stmt in statements {
// just have to resolve each statement in turn
self.visit_stmt(stmt, env)?;
}
self.end_scope();
}
Stmt::Expression(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Function(name, params, ref mut body) => {
// functions bind var names and create a local scope
// first, handle the binding of the function name
// (eagerly, so the function can recursively refer to itself)
self.declare(name);
self.define(name);
// then handle the function body
self.resolve_function(params, body, env, FunctionKind::Function)?;
}
Stmt::If(ref mut if_expr, ref mut then_stmt, ref mut opt_else_stmt) => {
// resolve the condition and both branches
self.visit_expr(if_expr, env)?;
self.visit_stmt(then_stmt, env)?;
if let Some(s) = opt_else_stmt {
self.visit_stmt(s, env)?;
}
}
Stmt::Print(ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Stmt::Return(ref mut expr) => {
// check that we are actually in a function
// TODO: this should probably use the position of the Stmt
// (BUT, there is not Position for Stmt, so have to implement that...)
if self.current_fn == FunctionKind::None {
self.error(expr.position().clone(), "cannot return from top-level code");
}
self.visit_expr(expr, env)?; // resolve the parts
} | // variable binding is split into 2 steps - declaring and defining
self.declare(name);
self.visit_expr(expr, env)?;
self.define(name);
}
Stmt::While(ref mut condition_expr, ref mut body) => {
// resolve the condition and body
self.visit_expr(condition_expr, env)?;
self.visit_stmt(body, env)?;
}
}
Ok(())
}
fn visit_expr(&mut self, e: &mut Expr, env: &Rc<Environment>) -> Result<(), String> {
match e {
Expr::Assign(_pos, var_name, ref mut expr, ref mut resolved_vars) => {
// resolve the expr first in case it also contains other vars
self.visit_expr(expr, env)?;
// then resolve the var being assigned to
self.resolve_local(var_name, resolved_vars);
}
Expr::Binary(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve both operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Call(_pos, ref mut callee_expr, args) => {
// resolve the thing being called
self.visit_expr(callee_expr, env)?;
// then walk the arg list and resolve those
for arg in args {
self.visit_expr(arg, env)?;
}
}
Expr::Grouping(_pos, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the parts
}
Expr::Literal(_pos, _lit) => {
// nothing to do - literals don't mention vars, and don't have subexpressions
}
Expr::Logical(_pos, ref mut expr1, _op, ref mut expr2) => {
// resolve body operands
self.visit_expr(expr1, env)?;
self.visit_expr(expr2, env)?;
}
Expr::Unary(_pos, _op, ref mut expr) => {
self.visit_expr(expr, env)?; // resolve the operand
}
Expr::Variable(pos, name, ref mut resolved_vars) => {
// have to check the scope maps to resolve var expressions
match self.scopes.last() {
None => (),
Some(scope) => {
// check if the var is referring to itself in its initializer
if scope.get(name) == Some(&false) {
self.error(pos.clone(), "cannot read local var in its initializer");
}
}
}
// actually resolve the var
self.resolve_local(name, resolved_vars);
}
}
Ok(())
}
fn visit_literal(&self, _l: &Literal, _env: &Rc<Environment>) -> Result<(), String> {
// nothing to do for these - not going to actually call the visit method above
Ok(())
}
} | Stmt::Var(name, ref mut expr) => {
// this adds a new entry to the innermost scope | random_line_split |
youtube.rs | use anyhow::{Context, Result};
use chrono::offset::TimeZone;
use log::{debug, trace};
use crate::common::{Service, YoutubeID};
fn api_prefix() -> String |
/*
[
{
title: String,
videoId: String,
author: String,
authorId: String,
authorUrl: String,
videoThumbnails: [
{
quality: String,
url: String,
width: Int32,
height: Int32
}
],
description: String,
descriptionHtml: String,
viewCount: Int64,
published: Int64,
publishedText: String,
lengthSeconds: Int32
paid: Bool,
premium: Bool
}
]
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTVideoInfo {
title: String,
video_id: String,
video_thumbnails: Vec<YTThumbnailInfo>,
description: String,
length_seconds: i32,
paid: bool,
premium: bool,
published: i64,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTThumbnailInfo {
quality: Option<String>,
url: String,
width: i32,
height: i32,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTChannelInfo {
author: String,
author_id: String,
description: String,
author_thumbnails: Vec<YTThumbnailInfo>,
author_banners: Vec<YTThumbnailInfo>,
}
/// Important info about channel
#[derive(Debug)]
pub struct ChannelMetadata {
pub title: String,
pub thumbnail: String,
pub description: String,
}
/// Important info about a video
pub struct VideoInfo {
pub id: String,
pub url: String,
pub title: String,
pub description: String,
pub thumbnail_url: String,
pub published_at: chrono::DateTime<chrono::Utc>,
}
impl std::fmt::Debug for VideoInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}",
self.id, self.title, self.url, self.published_at,
)
}
}
fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text()?;
trace!("Raw response: {}", &text);
let data: T = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(data)
}
let mut tries = 0;
let ret: Result<T> = loop {
let resp = subreq(url);
if let Ok(data) = resp {
break Ok(data);
}
debug!("Retrying request to {} because {:?}", &url, &resp);
if tries > 3 {
break resp;
}
tries += 1;
};
ret
}
/// Object to query data about given channel
#[derive(Debug)]
pub struct YoutubeQuery<'a> {
chan_id: &'a YoutubeID,
}
impl<'a> YoutubeQuery<'a> {
pub fn new(chan_id: &YoutubeID) -> YoutubeQuery {
YoutubeQuery { chan_id }
}
pub fn get_metadata(&self) -> Result<ChannelMetadata> {
let url = format!(
"{prefix}/api/v1/channels/{chanid}",
prefix = api_prefix(),
chanid = self.chan_id.id
);
let d: YTChannelInfo = request_data(&url)?;
Ok(ChannelMetadata {
title: d.author.clone(),
thumbnail: d.author_thumbnails[0].url.clone(),
description: d.description.clone(),
})
}
pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i {
// GET /api/v1/channels/:ucid/videos?page=1
fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> {
let url = format!(
"{prefix}/api/v1/channels/videos/{chanid}?page={page}",
prefix = api_prefix(),
chanid = chanid,
page = page,
);
let data: Vec<YTVideoInfo> = request_data(&url)?;
let ret: Vec<VideoInfo> = data
.iter()
.map(|d| VideoInfo {
id: d.video_id.clone(),
url: format!("http://youtube.com/watch?v={id}", id = d.video_id),
title: d.title.clone(),
description: d.description.clone(),
thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(),
published_at: chrono::Utc.timestamp(d.published, 0),
})
.collect();
Ok(ret)
}
let mut page_num = 1;
use std::collections::VecDeque;
let mut completed = false;
let mut current_items: VecDeque<VideoInfo> = VecDeque::new();
let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> {
if completed {
return None;
}
if let Some(cur) = current_items.pop_front() {
// Iterate through previously stored items
Some(Ok(cur))
} else {
// If nothing is stored, get next page of videos
let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num);
page_num += 1; // Increment for future
let nextup: Option<Result<VideoInfo>> = match data {
// Something went wrong, return an error item
Err(e) => {
// Error state, prevent future iteration
completed = true;
// Return error
Some(Err(e))
}
Ok(new_items) => {
if new_items.len() == 0 {
// No more items, stop iterator
None
} else {
current_items.extend(new_items);
Some(Ok(current_items.pop_front().unwrap()))
}
}
};
nextup
}
});
it
}
}
/// Find channel ID either from a username or ID
use crate::common::ChannelID;
pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> {
match service {
Service::Youtube => {
debug!("Looking up by username");
let url = format!(
"{prefix}/api/v1/channels/{name}",
prefix = api_prefix(),
name = name
);
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text().unwrap();
trace!("Raw response: {}", &text);
let data: YTChannelInfo = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(ChannelID::Youtube(YoutubeID { id: data.author_id }))
}
Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic_find() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json") // Same content
.create();
let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?;
assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ");
assert_eq!(c.service(), crate::common::Service::Youtube);
// Check same `ChannelID` is found by ID as by username
let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?;
assert_eq!(by_id, c);
Ok(())
}
#[test]
fn test_video_list() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body_from_file("testdata/channel_climb_page1.json")
.create();
let mock_p2 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2",
)
.with_body_from_file("testdata/channel_climb_page2.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let vids = yt.videos();
let result: Vec<super::VideoInfo> = vids
.into_iter()
.skip(58) // 60 videos per page, want to breach boundry
.take(3)
.collect::<Result<Vec<super::VideoInfo>>>()?;
assert_eq!(result[0].title, "Vlog 013 - Excommunication");
assert_eq!(result[1].title, "Vlog 012 - Only in America!");
assert_eq!(
result[2].title,
"Vlog 011 - The part of the house no-one ever sees!"
);
dbg!(result);
mock_p1.expect(1);
mock_p2.expect(1);
Ok(())
}
#[test]
fn test_video_list_error() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body("garbagenonsense")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let mut vids = yt.videos();
assert!(vids.next().unwrap().is_err());
mock_p1.expect(1);
assert!(vids.next().is_none());
Ok(())
}
#[test]
fn test_metadata() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCUBfKCp83QT19JCUekEdxOQ".into(),
};
let yt = YoutubeQuery::new(&cid);
let meta = yt.get_metadata()?;
assert_eq!(meta.title, "thegreatsd");
Ok(())
}
}
| {
#[cfg(test)]
let prefix: &str = &mockito::server_url();
#[cfg(not(test))]
let prefix: &str = "https://invidio.us";
prefix.into()
} | identifier_body |
youtube.rs | use anyhow::{Context, Result};
use chrono::offset::TimeZone;
use log::{debug, trace};
use crate::common::{Service, YoutubeID};
fn api_prefix() -> String {
#[cfg(test)]
let prefix: &str = &mockito::server_url();
#[cfg(not(test))]
let prefix: &str = "https://invidio.us";
prefix.into()
}
/*
[
{
title: String,
videoId: String,
author: String,
authorId: String,
authorUrl: String,
videoThumbnails: [
{
quality: String,
url: String,
width: Int32,
height: Int32
}
],
description: String,
descriptionHtml: String,
viewCount: Int64,
published: Int64,
publishedText: String,
lengthSeconds: Int32
paid: Bool,
premium: Bool
}
]
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTVideoInfo {
title: String,
video_id: String,
video_thumbnails: Vec<YTThumbnailInfo>,
description: String,
length_seconds: i32,
paid: bool,
premium: bool,
published: i64,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTThumbnailInfo {
quality: Option<String>,
url: String,
width: i32,
height: i32,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTChannelInfo {
author: String,
author_id: String,
description: String,
author_thumbnails: Vec<YTThumbnailInfo>,
author_banners: Vec<YTThumbnailInfo>,
}
| /// Important info about channel
#[derive(Debug)]
pub struct ChannelMetadata {
pub title: String,
pub thumbnail: String,
pub description: String,
}
/// Important info about a video
pub struct VideoInfo {
pub id: String,
pub url: String,
pub title: String,
pub description: String,
pub thumbnail_url: String,
pub published_at: chrono::DateTime<chrono::Utc>,
}
impl std::fmt::Debug for VideoInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}",
self.id, self.title, self.url, self.published_at,
)
}
}
fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text()?;
trace!("Raw response: {}", &text);
let data: T = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(data)
}
let mut tries = 0;
let ret: Result<T> = loop {
let resp = subreq(url);
if let Ok(data) = resp {
break Ok(data);
}
debug!("Retrying request to {} because {:?}", &url, &resp);
if tries > 3 {
break resp;
}
tries += 1;
};
ret
}
/// Object to query data about given channel
#[derive(Debug)]
pub struct YoutubeQuery<'a> {
chan_id: &'a YoutubeID,
}
impl<'a> YoutubeQuery<'a> {
pub fn new(chan_id: &YoutubeID) -> YoutubeQuery {
YoutubeQuery { chan_id }
}
pub fn get_metadata(&self) -> Result<ChannelMetadata> {
let url = format!(
"{prefix}/api/v1/channels/{chanid}",
prefix = api_prefix(),
chanid = self.chan_id.id
);
let d: YTChannelInfo = request_data(&url)?;
Ok(ChannelMetadata {
title: d.author.clone(),
thumbnail: d.author_thumbnails[0].url.clone(),
description: d.description.clone(),
})
}
pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i {
// GET /api/v1/channels/:ucid/videos?page=1
fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> {
let url = format!(
"{prefix}/api/v1/channels/videos/{chanid}?page={page}",
prefix = api_prefix(),
chanid = chanid,
page = page,
);
let data: Vec<YTVideoInfo> = request_data(&url)?;
let ret: Vec<VideoInfo> = data
.iter()
.map(|d| VideoInfo {
id: d.video_id.clone(),
url: format!("http://youtube.com/watch?v={id}", id = d.video_id),
title: d.title.clone(),
description: d.description.clone(),
thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(),
published_at: chrono::Utc.timestamp(d.published, 0),
})
.collect();
Ok(ret)
}
let mut page_num = 1;
use std::collections::VecDeque;
let mut completed = false;
let mut current_items: VecDeque<VideoInfo> = VecDeque::new();
let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> {
if completed {
return None;
}
if let Some(cur) = current_items.pop_front() {
// Iterate through previously stored items
Some(Ok(cur))
} else {
// If nothing is stored, get next page of videos
let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num);
page_num += 1; // Increment for future
let nextup: Option<Result<VideoInfo>> = match data {
// Something went wrong, return an error item
Err(e) => {
// Error state, prevent future iteration
completed = true;
// Return error
Some(Err(e))
}
Ok(new_items) => {
if new_items.len() == 0 {
// No more items, stop iterator
None
} else {
current_items.extend(new_items);
Some(Ok(current_items.pop_front().unwrap()))
}
}
};
nextup
}
});
it
}
}
/// Find channel ID either from a username or ID
use crate::common::ChannelID;
pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> {
match service {
Service::Youtube => {
debug!("Looking up by username");
let url = format!(
"{prefix}/api/v1/channels/{name}",
prefix = api_prefix(),
name = name
);
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text().unwrap();
trace!("Raw response: {}", &text);
let data: YTChannelInfo = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(ChannelID::Youtube(YoutubeID { id: data.author_id }))
}
Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic_find() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json") // Same content
.create();
let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?;
assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ");
assert_eq!(c.service(), crate::common::Service::Youtube);
// Check same `ChannelID` is found by ID as by username
let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?;
assert_eq!(by_id, c);
Ok(())
}
#[test]
fn test_video_list() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body_from_file("testdata/channel_climb_page1.json")
.create();
let mock_p2 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2",
)
.with_body_from_file("testdata/channel_climb_page2.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let vids = yt.videos();
let result: Vec<super::VideoInfo> = vids
.into_iter()
.skip(58) // 60 videos per page, want to breach boundry
.take(3)
.collect::<Result<Vec<super::VideoInfo>>>()?;
assert_eq!(result[0].title, "Vlog 013 - Excommunication");
assert_eq!(result[1].title, "Vlog 012 - Only in America!");
assert_eq!(
result[2].title,
"Vlog 011 - The part of the house no-one ever sees!"
);
dbg!(result);
mock_p1.expect(1);
mock_p2.expect(1);
Ok(())
}
#[test]
fn test_video_list_error() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body("garbagenonsense")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let mut vids = yt.videos();
assert!(vids.next().unwrap().is_err());
mock_p1.expect(1);
assert!(vids.next().is_none());
Ok(())
}
#[test]
fn test_metadata() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCUBfKCp83QT19JCUekEdxOQ".into(),
};
let yt = YoutubeQuery::new(&cid);
let meta = yt.get_metadata()?;
assert_eq!(meta.title, "thegreatsd");
Ok(())
}
} | random_line_split |
|
youtube.rs | use anyhow::{Context, Result};
use chrono::offset::TimeZone;
use log::{debug, trace};
use crate::common::{Service, YoutubeID};
fn api_prefix() -> String {
#[cfg(test)]
let prefix: &str = &mockito::server_url();
#[cfg(not(test))]
let prefix: &str = "https://invidio.us";
prefix.into()
}
/*
[
{
title: String,
videoId: String,
author: String,
authorId: String,
authorUrl: String,
videoThumbnails: [
{
quality: String,
url: String,
width: Int32,
height: Int32
}
],
description: String,
descriptionHtml: String,
viewCount: Int64,
published: Int64,
publishedText: String,
lengthSeconds: Int32
paid: Bool,
premium: Bool
}
]
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTVideoInfo {
title: String,
video_id: String,
video_thumbnails: Vec<YTThumbnailInfo>,
description: String,
length_seconds: i32,
paid: bool,
premium: bool,
published: i64,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTThumbnailInfo {
quality: Option<String>,
url: String,
width: i32,
height: i32,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTChannelInfo {
author: String,
author_id: String,
description: String,
author_thumbnails: Vec<YTThumbnailInfo>,
author_banners: Vec<YTThumbnailInfo>,
}
/// Important info about channel
#[derive(Debug)]
pub struct ChannelMetadata {
pub title: String,
pub thumbnail: String,
pub description: String,
}
/// Important info about a video
pub struct VideoInfo {
pub id: String,
pub url: String,
pub title: String,
pub description: String,
pub thumbnail_url: String,
pub published_at: chrono::DateTime<chrono::Utc>,
}
impl std::fmt::Debug for VideoInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}",
self.id, self.title, self.url, self.published_at,
)
}
}
fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text()?;
trace!("Raw response: {}", &text);
let data: T = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(data)
}
let mut tries = 0;
let ret: Result<T> = loop {
let resp = subreq(url);
if let Ok(data) = resp {
break Ok(data);
}
debug!("Retrying request to {} because {:?}", &url, &resp);
if tries > 3 {
break resp;
}
tries += 1;
};
ret
}
/// Object to query data about given channel
#[derive(Debug)]
pub struct YoutubeQuery<'a> {
chan_id: &'a YoutubeID,
}
impl<'a> YoutubeQuery<'a> {
pub fn new(chan_id: &YoutubeID) -> YoutubeQuery {
YoutubeQuery { chan_id }
}
pub fn get_metadata(&self) -> Result<ChannelMetadata> {
let url = format!(
"{prefix}/api/v1/channels/{chanid}",
prefix = api_prefix(),
chanid = self.chan_id.id
);
let d: YTChannelInfo = request_data(&url)?;
Ok(ChannelMetadata {
title: d.author.clone(),
thumbnail: d.author_thumbnails[0].url.clone(),
description: d.description.clone(),
})
}
pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i {
// GET /api/v1/channels/:ucid/videos?page=1
fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> {
let url = format!(
"{prefix}/api/v1/channels/videos/{chanid}?page={page}",
prefix = api_prefix(),
chanid = chanid,
page = page,
);
let data: Vec<YTVideoInfo> = request_data(&url)?;
let ret: Vec<VideoInfo> = data
.iter()
.map(|d| VideoInfo {
id: d.video_id.clone(),
url: format!("http://youtube.com/watch?v={id}", id = d.video_id),
title: d.title.clone(),
description: d.description.clone(),
thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(),
published_at: chrono::Utc.timestamp(d.published, 0),
})
.collect();
Ok(ret)
}
let mut page_num = 1;
use std::collections::VecDeque;
let mut completed = false;
let mut current_items: VecDeque<VideoInfo> = VecDeque::new();
let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> {
if completed {
return None;
}
if let Some(cur) = current_items.pop_front() {
// Iterate through previously stored items
Some(Ok(cur))
} else {
// If nothing is stored, get next page of videos
let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num);
page_num += 1; // Increment for future
let nextup: Option<Result<VideoInfo>> = match data {
// Something went wrong, return an error item
Err(e) => |
Ok(new_items) => {
if new_items.len() == 0 {
// No more items, stop iterator
None
} else {
current_items.extend(new_items);
Some(Ok(current_items.pop_front().unwrap()))
}
}
};
nextup
}
});
it
}
}
/// Find channel ID either from a username or ID
use crate::common::ChannelID;
pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> {
match service {
Service::Youtube => {
debug!("Looking up by username");
let url = format!(
"{prefix}/api/v1/channels/{name}",
prefix = api_prefix(),
name = name
);
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text().unwrap();
trace!("Raw response: {}", &text);
let data: YTChannelInfo = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(ChannelID::Youtube(YoutubeID { id: data.author_id }))
}
Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic_find() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json") // Same content
.create();
let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?;
assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ");
assert_eq!(c.service(), crate::common::Service::Youtube);
// Check same `ChannelID` is found by ID as by username
let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?;
assert_eq!(by_id, c);
Ok(())
}
#[test]
fn test_video_list() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body_from_file("testdata/channel_climb_page1.json")
.create();
let mock_p2 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2",
)
.with_body_from_file("testdata/channel_climb_page2.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let vids = yt.videos();
let result: Vec<super::VideoInfo> = vids
.into_iter()
.skip(58) // 60 videos per page, want to breach boundry
.take(3)
.collect::<Result<Vec<super::VideoInfo>>>()?;
assert_eq!(result[0].title, "Vlog 013 - Excommunication");
assert_eq!(result[1].title, "Vlog 012 - Only in America!");
assert_eq!(
result[2].title,
"Vlog 011 - The part of the house no-one ever sees!"
);
dbg!(result);
mock_p1.expect(1);
mock_p2.expect(1);
Ok(())
}
#[test]
fn test_video_list_error() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body("garbagenonsense")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let mut vids = yt.videos();
assert!(vids.next().unwrap().is_err());
mock_p1.expect(1);
assert!(vids.next().is_none());
Ok(())
}
#[test]
fn test_metadata() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCUBfKCp83QT19JCUekEdxOQ".into(),
};
let yt = YoutubeQuery::new(&cid);
let meta = yt.get_metadata()?;
assert_eq!(meta.title, "thegreatsd");
Ok(())
}
}
| {
// Error state, prevent future iteration
completed = true;
// Return error
Some(Err(e))
} | conditional_block |
youtube.rs | use anyhow::{Context, Result};
use chrono::offset::TimeZone;
use log::{debug, trace};
use crate::common::{Service, YoutubeID};
fn api_prefix() -> String {
#[cfg(test)]
let prefix: &str = &mockito::server_url();
#[cfg(not(test))]
let prefix: &str = "https://invidio.us";
prefix.into()
}
/*
[
{
title: String,
videoId: String,
author: String,
authorId: String,
authorUrl: String,
videoThumbnails: [
{
quality: String,
url: String,
width: Int32,
height: Int32
}
],
description: String,
descriptionHtml: String,
viewCount: Int64,
published: Int64,
publishedText: String,
lengthSeconds: Int32
paid: Bool,
premium: Bool
}
]
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTVideoInfo {
title: String,
video_id: String,
video_thumbnails: Vec<YTThumbnailInfo>,
description: String,
length_seconds: i32,
paid: bool,
premium: bool,
published: i64,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTThumbnailInfo {
quality: Option<String>,
url: String,
width: i32,
height: i32,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
struct YTChannelInfo {
author: String,
author_id: String,
description: String,
author_thumbnails: Vec<YTThumbnailInfo>,
author_banners: Vec<YTThumbnailInfo>,
}
/// Important info about channel
#[derive(Debug)]
pub struct ChannelMetadata {
pub title: String,
pub thumbnail: String,
pub description: String,
}
/// Important info about a video
pub struct VideoInfo {
pub id: String,
pub url: String,
pub title: String,
pub description: String,
pub thumbnail_url: String,
pub published_at: chrono::DateTime<chrono::Utc>,
}
impl std::fmt::Debug for VideoInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"VideoInfo{{id: {:?}, title: {:?}, url: {:?}, published_at: {:?}}}",
self.id, self.title, self.url, self.published_at,
)
}
}
fn request_data<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
fn subreq<T: serde::de::DeserializeOwned + std::fmt::Debug>(url: &str) -> Result<T> {
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text()?;
trace!("Raw response: {}", &text);
let data: T = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(data)
}
let mut tries = 0;
let ret: Result<T> = loop {
let resp = subreq(url);
if let Ok(data) = resp {
break Ok(data);
}
debug!("Retrying request to {} because {:?}", &url, &resp);
if tries > 3 {
break resp;
}
tries += 1;
};
ret
}
/// Object to query data about given channel
#[derive(Debug)]
pub struct YoutubeQuery<'a> {
chan_id: &'a YoutubeID,
}
impl<'a> YoutubeQuery<'a> {
pub fn new(chan_id: &YoutubeID) -> YoutubeQuery {
YoutubeQuery { chan_id }
}
pub fn | (&self) -> Result<ChannelMetadata> {
let url = format!(
"{prefix}/api/v1/channels/{chanid}",
prefix = api_prefix(),
chanid = self.chan_id.id
);
let d: YTChannelInfo = request_data(&url)?;
Ok(ChannelMetadata {
title: d.author.clone(),
thumbnail: d.author_thumbnails[0].url.clone(),
description: d.description.clone(),
})
}
pub fn videos<'i>(&'i self) -> impl Iterator<Item = Result<VideoInfo>> + 'i {
// GET /api/v1/channels/:ucid/videos?page=1
fn get_page(chanid: &str, page: i32) -> Result<Vec<VideoInfo>> {
let url = format!(
"{prefix}/api/v1/channels/videos/{chanid}?page={page}",
prefix = api_prefix(),
chanid = chanid,
page = page,
);
let data: Vec<YTVideoInfo> = request_data(&url)?;
let ret: Vec<VideoInfo> = data
.iter()
.map(|d| VideoInfo {
id: d.video_id.clone(),
url: format!("http://youtube.com/watch?v={id}", id = d.video_id),
title: d.title.clone(),
description: d.description.clone(),
thumbnail_url: d.video_thumbnails.first().unwrap().url.clone(),
published_at: chrono::Utc.timestamp(d.published, 0),
})
.collect();
Ok(ret)
}
let mut page_num = 1;
use std::collections::VecDeque;
let mut completed = false;
let mut current_items: VecDeque<VideoInfo> = VecDeque::new();
let it = std::iter::from_fn(move || -> Option<Result<VideoInfo>> {
if completed {
return None;
}
if let Some(cur) = current_items.pop_front() {
// Iterate through previously stored items
Some(Ok(cur))
} else {
// If nothing is stored, get next page of videos
let data: Result<Vec<VideoInfo>> = get_page(&self.chan_id.id, page_num);
page_num += 1; // Increment for future
let nextup: Option<Result<VideoInfo>> = match data {
// Something went wrong, return an error item
Err(e) => {
// Error state, prevent future iteration
completed = true;
// Return error
Some(Err(e))
}
Ok(new_items) => {
if new_items.len() == 0 {
// No more items, stop iterator
None
} else {
current_items.extend(new_items);
Some(Ok(current_items.pop_front().unwrap()))
}
}
};
nextup
}
});
it
}
}
/// Find channel ID either from a username or ID
use crate::common::ChannelID;
pub fn find_channel_id(name: &str, service: &Service) -> Result<ChannelID> {
match service {
Service::Youtube => {
debug!("Looking up by username");
let url = format!(
"{prefix}/api/v1/channels/{name}",
prefix = api_prefix(),
name = name
);
debug!("Retrieving URL {}", &url);
let resp = attohttpc::get(&url).send()?;
let text = resp.text().unwrap();
trace!("Raw response: {}", &text);
let data: YTChannelInfo = serde_json::from_str(&text)
.with_context(|| format!("Failed to parse response from {}", &url))?;
trace!("Raw deserialisation: {:?}", &data);
Ok(ChannelID::Youtube(YoutubeID { id: data.author_id }))
}
Service::Vimeo => Err(anyhow::anyhow!("Not yet implemented!")), // FIXME: This method belongs outside of youtube.rs
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_basic_find() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/thegreatsd")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let _m2 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json") // Same content
.create();
let c = find_channel_id("thegreatsd", &crate::common::Service::Youtube)?;
assert_eq!(c.id_str(), "UCUBfKCp83QT19JCUekEdxOQ");
assert_eq!(c.service(), crate::common::Service::Youtube);
// Check same `ChannelID` is found by ID as by username
let by_id = find_channel_id("UCUBfKCp83QT19JCUekEdxOQ", &crate::common::Service::Youtube)?;
assert_eq!(by_id, c);
Ok(())
}
#[test]
fn test_video_list() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body_from_file("testdata/channel_climb_page1.json")
.create();
let mock_p2 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=2",
)
.with_body_from_file("testdata/channel_climb_page2.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let vids = yt.videos();
let result: Vec<super::VideoInfo> = vids
.into_iter()
.skip(58) // 60 videos per page, want to breach boundry
.take(3)
.collect::<Result<Vec<super::VideoInfo>>>()?;
assert_eq!(result[0].title, "Vlog 013 - Excommunication");
assert_eq!(result[1].title, "Vlog 012 - Only in America!");
assert_eq!(
result[2].title,
"Vlog 011 - The part of the house no-one ever sees!"
);
dbg!(result);
mock_p1.expect(1);
mock_p2.expect(1);
Ok(())
}
#[test]
fn test_video_list_error() -> Result<()> {
let mock_p1 = mockito::mock(
"GET",
"/api/v1/channels/videos/UCOYYX1Ucvx87A7CSy5M99yw?page=1",
)
.with_body("garbagenonsense")
.create();
let cid = crate::common::YoutubeID {
id: "UCOYYX1Ucvx87A7CSy5M99yw".into(),
};
let yt = YoutubeQuery::new(&cid);
let mut vids = yt.videos();
assert!(vids.next().unwrap().is_err());
mock_p1.expect(1);
assert!(vids.next().is_none());
Ok(())
}
#[test]
fn test_metadata() -> Result<()> {
let _m1 = mockito::mock("GET", "/api/v1/channels/UCUBfKCp83QT19JCUekEdxOQ")
.with_body_from_file("testdata/channel_thegreatsd.json")
.create();
let cid = crate::common::YoutubeID {
id: "UCUBfKCp83QT19JCUekEdxOQ".into(),
};
let yt = YoutubeQuery::new(&cid);
let meta = yt.get_metadata()?;
assert_eq!(meta.title, "thegreatsd");
Ok(())
}
}
| get_metadata | identifier_name |
x25519.rs | // -*- mode: rust; -*-
//
// This file is part of x25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// Copyright (c) 2019 DebugSteven
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
// - DebugSteven <[email protected]>
//! x25519 Diffie-Hellman key exchange
//!
//! This implements x25519 key exchange as specified by Mike Hamburg
//! and Adam Langley in [RFC7748](https://tools.ietf.org/html/rfc7748).
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
use curve25519_dalek::montgomery::MontgomeryPoint;
use curve25519_dalek::scalar::Scalar;
use rand_core::CryptoRng;
use rand_core::RngCore;
use zeroize::Zeroize;
/// A Diffie-Hellman public key, corresponding to an [`EphemeralSecret`] or [`StaticSecret`] key.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub struct PublicKey(pub(crate) MontgomeryPoint);
impl From<[u8; 32]> for PublicKey {
/// Given a byte array, construct a x25519 `PublicKey`.
fn from(bytes: [u8; 32]) -> PublicKey {
PublicKey(MontgomeryPoint(bytes))
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] |
}
/// A short-lived Diffie-Hellman secret key that can only be used to compute a single
/// [`SharedSecret`].
///
/// This type is identical to the [`StaticSecret`] type, except that the
/// [`EphemeralSecret::diffie_hellman`] method consumes and then wipes the secret key, and there
/// are no serialization methods defined. This means that [`EphemeralSecret`]s can only be
/// generated from fresh randomness by [`EphemeralSecret::new`] and the compiler statically checks
/// that the resulting secret is used at most once.
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct EphemeralSecret(pub(crate) Scalar);
impl EphemeralSecret {
/// Perform a Diffie-Hellman key agreement between `self` and
/// `their_public` key to produce a [`SharedSecret`].
pub fn diffie_hellman(self, their_public: &PublicKey) -> SharedSecret {
SharedSecret(self.0 * their_public.0)
}
/// Generate an x25519 [`EphemeralSecret`] key.
pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self {
let mut bytes = [0u8; 32];
csprng.fill_bytes(&mut bytes);
EphemeralSecret(clamp_scalar(bytes))
}
}
impl<'a> From<&'a EphemeralSecret> for PublicKey {
/// Given an x25519 [`EphemeralSecret`] key, compute its corresponding [`PublicKey`].
fn from(secret: &'a EphemeralSecret) -> PublicKey {
PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery())
}
}
/// A Diffie-Hellman secret key that can be used to compute multiple [`SharedSecret`]s.
///
/// This type is identical to the [`EphemeralSecret`] type, except that the
/// [`StaticSecret::diffie_hellman`] method does not consume the secret key, and the type provides
/// serialization methods to save and load key material. This means that the secret may be used
/// multiple times (but does not *have to be*).
///
/// Some protocols, such as Noise, already handle the static/ephemeral distinction, so the
/// additional guarantees provided by [`EphemeralSecret`] are not helpful or would cause duplicate
/// code paths. In this case, it may be useful to
/// ```rust,ignore
/// use x25519_dalek::StaticSecret as SecretKey;
/// ```
/// since the only difference between the two is that [`StaticSecret`] does not enforce at
/// compile-time that the key is only used once.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[derive(Clone, Zeroize)]
#[zeroize(drop)]
pub struct StaticSecret(
#[cfg_attr(feature = "serde", serde(with = "AllowUnreducedScalarBytes"))] pub(crate) Scalar,
);
impl StaticSecret {
/// Perform a Diffie-Hellman key agreement between `self` and
/// `their_public` key to produce a `SharedSecret`.
pub fn diffie_hellman(&self, their_public: &PublicKey) -> SharedSecret {
SharedSecret(&self.0 * their_public.0)
}
/// Generate an x25519 key.
pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self {
let mut bytes = [0u8; 32];
csprng.fill_bytes(&mut bytes);
StaticSecret(clamp_scalar(bytes))
}
/// Extract this key's bytes for serialization.
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
}
impl From<[u8; 32]> for StaticSecret {
/// Load a secret key from a byte array.
fn from(bytes: [u8; 32]) -> StaticSecret {
StaticSecret(clamp_scalar(bytes))
}
}
impl<'a> From<&'a StaticSecret> for PublicKey {
/// Given an x25519 [`StaticSecret`] key, compute its corresponding [`PublicKey`].
fn from(secret: &'a StaticSecret) -> PublicKey {
PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery())
}
}
/// The result of a Diffie-Hellman key exchange.
///
/// Each party computes this using their [`EphemeralSecret`] or [`StaticSecret`] and their
/// counterparty's [`PublicKey`].
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct SharedSecret(pub(crate) MontgomeryPoint);
impl SharedSecret {
/// Convert this shared secret to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this shared secret key as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
/// "Decode" a scalar from a 32-byte array.
///
/// By "decode" here, what is really meant is applying key clamping by twiddling
/// some bits.
///
/// # Returns
///
/// A `Scalar`.
fn clamp_scalar(mut scalar: [u8; 32]) -> Scalar {
scalar[0] &= 248;
scalar[31] &= 127;
scalar[31] |= 64;
Scalar::from_bits(scalar)
}
/// The bare, byte-oriented x25519 function, exactly as specified in RFC7748.
///
/// This can be used with [`X25519_BASEPOINT_BYTES`] for people who
/// cannot use the better, safer, and faster DH API.
pub fn x25519(k: [u8; 32], u: [u8; 32]) -> [u8; 32] {
(clamp_scalar(k) * MontgomeryPoint(u)).to_bytes()
}
/// The X25519 basepoint, for use with the bare, byte-oriented x25519
/// function. This is provided for people who cannot use the typed
/// DH API for some reason.
pub const X25519_BASEPOINT_BYTES: [u8; 32] = [
9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
/// Derived serialization methods will not work on a StaticSecret because x25519 requires
/// non-canonical scalars which are rejected by curve25519-dalek. Thus we provide a way to convert
/// the bytes directly to a scalar using Serde's remote derive functionality.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[cfg_attr(feature = "serde", serde(remote = "Scalar"))]
struct AllowUnreducedScalarBytes(
#[cfg_attr(feature = "serde", serde(getter = "Scalar::to_bytes"))] [u8; 32],
);
impl From<AllowUnreducedScalarBytes> for Scalar {
fn from(bytes: AllowUnreducedScalarBytes) -> Scalar {
clamp_scalar(bytes.0)
}
}
#[cfg(test)]
mod test {
use super::*;
use rand_core::OsRng;
#[test]
fn byte_basepoint_matches_edwards_scalar_mul() {
let mut scalar_bytes = [0x37; 32];
for i in 0..32 {
scalar_bytes[i] += 2;
let result = x25519(scalar_bytes, X25519_BASEPOINT_BYTES);
let expected = (&ED25519_BASEPOINT_TABLE * &clamp_scalar(scalar_bytes))
.to_montgomery()
.to_bytes();
assert_eq!(result, expected);
}
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_public_key_roundtrip() {
use bincode;
let public_key = PublicKey::from(X25519_BASEPOINT_BYTES);
let encoded = bincode::serialize(&public_key).unwrap();
let decoded: PublicKey = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded.as_bytes(), public_key.as_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_public_key_matches_from_bytes() {
use bincode;
let expected = PublicKey::from(X25519_BASEPOINT_BYTES);
let decoded: PublicKey = bincode::deserialize(&X25519_BASEPOINT_BYTES).unwrap();
assert_eq!(decoded.as_bytes(), expected.as_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_static_secret_roundtrip() {
use bincode;
let static_secret = StaticSecret(clamp_scalar([0x24; 32]));
let encoded = bincode::serialize(&static_secret).unwrap();
let decoded: StaticSecret = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded.to_bytes(), static_secret.to_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_static_secret_matches_from_bytes() {
use bincode;
let expected = StaticSecret(clamp_scalar([0x24; 32]));
let clamped_bytes = clamp_scalar([0x24; 32]).to_bytes();
let decoded: StaticSecret = bincode::deserialize(&clamped_bytes).unwrap();
assert_eq!(decoded.to_bytes(), expected.to_bytes());
}
fn do_rfc7748_ladder_test1(input_scalar: [u8; 32], input_point: [u8; 32], expected: [u8; 32]) {
let result = x25519(input_scalar, input_point);
assert_eq!(result, expected);
}
#[test]
fn rfc7748_ladder_test1_vectorset1() {
let input_scalar: [u8; 32] = [
0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46,
0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x44,
0xba, 0x44, 0x9a, 0xc4,
];
let input_point: [u8; 32] = [
0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1,
0x5f, 0x7c, 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, 0x10, 0xa9, 0x03, 0xa6,
0xd0, 0xab, 0x1c, 0x4c,
];
let expected: [u8; 32] = [
0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d,
0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55,
0x77, 0xa2, 0x85, 0x52,
];
do_rfc7748_ladder_test1(input_scalar, input_point, expected);
}
#[test]
fn rfc7748_ladder_test1_vectorset2() {
let input_scalar: [u8; 32] = [
0x4b, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d,
0x6a, 0xf5, 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, 0x2c, 0xa4, 0x16, 0x9e,
0x79, 0x18, 0xba, 0x0d,
];
let input_point: [u8; 32] = [
0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38,
0xae, 0x2c, 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, 0xfc, 0x4c, 0xd5, 0x49,
0xc7, 0x15, 0xa4, 0x93,
];
let expected: [u8; 32] = [
0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8,
0x73, 0xf8, 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, 0xe6, 0xf8, 0xf7, 0x64,
0x7a, 0xac, 0x79, 0x57,
];
do_rfc7748_ladder_test1(input_scalar, input_point, expected);
}
#[test]
#[ignore] // Run only if you want to burn a lot of CPU doing 1,000,000 DH operations
fn rfc7748_ladder_test2() {
use curve25519_dalek::constants::X25519_BASEPOINT;
let mut k: [u8; 32] = X25519_BASEPOINT.0;
let mut u: [u8; 32] = X25519_BASEPOINT.0;
let mut result: [u8; 32];
macro_rules! do_iterations {
($n:expr) => {
for _ in 0..$n {
result = x25519(k, u);
// OBVIOUS THING THAT I'M GOING TO NOTE ANYWAY BECAUSE I'VE
// SEEN PEOPLE DO THIS WITH GOLANG'S STDLIB AND YOU SURE AS
// HELL SHOULDN'T DO HORRIBLY STUPID THINGS LIKE THIS WITH
// MY LIBRARY:
//
// NEVER EVER TREAT SCALARS AS POINTS AND/OR VICE VERSA.
//
// ↓↓ DON'T DO THIS ↓↓
u = k.clone();
k = result;
}
};
}
// After one iteration:
// 422c8e7a6227d7bca1350b3e2bb7279f7897b87bb6854b783c60e80311ae3079
// After 1,000 iterations:
// 684cf59ba83309552800ef566f2f4d3c1c3887c49360e3875f2eb94d99532c51
// After 1,000,000 iterations:
// 7c3911e0ab2586fd864497297e575e6f3bc601c0883c30df5f4dd2d24f665424
do_iterations!(1);
assert_eq!(
k,
[
0x42, 0x2c, 0x8e, 0x7a, 0x62, 0x27, 0xd7, 0xbc, 0xa1, 0x35, 0x0b, 0x3e, 0x2b, 0xb7,
0x27, 0x9f, 0x78, 0x97, 0xb8, 0x7b, 0xb6, 0x85, 0x4b, 0x78, 0x3c, 0x60, 0xe8, 0x03,
0x11, 0xae, 0x30, 0x79,
]
);
do_iterations!(999);
assert_eq!(
k,
[
0x68, 0x4c, 0xf5, 0x9b, 0xa8, 0x33, 0x09, 0x55, 0x28, 0x00, 0xef, 0x56, 0x6f, 0x2f,
0x4d, 0x3c, 0x1c, 0x38, 0x87, 0xc4, 0x93, 0x60, 0xe3, 0x87, 0x5f, 0x2e, 0xb9, 0x4d,
0x99, 0x53, 0x2c, 0x51,
]
);
do_iterations!(999_000);
assert_eq!(
k,
[
0x7c, 0x39, 0x11, 0xe0, 0xab, 0x25, 0x86, 0xfd, 0x86, 0x44, 0x97, 0x29, 0x7e, 0x57,
0x5e, 0x6f, 0x3b, 0xc6, 0x01, 0xc0, 0x88, 0x3c, 0x30, 0xdf, 0x5f, 0x4d, 0xd2, 0xd2,
0x4f, 0x66, 0x54, 0x24,
]
);
}
}
| {
self.0.as_bytes()
} | identifier_body |
x25519.rs | // -*- mode: rust; -*-
//
// This file is part of x25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// Copyright (c) 2019 DebugSteven
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
// - DebugSteven <[email protected]>
//! x25519 Diffie-Hellman key exchange
//!
//! This implements x25519 key exchange as specified by Mike Hamburg
//! and Adam Langley in [RFC7748](https://tools.ietf.org/html/rfc7748).
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
use curve25519_dalek::montgomery::MontgomeryPoint;
use curve25519_dalek::scalar::Scalar;
use rand_core::CryptoRng;
use rand_core::RngCore;
use zeroize::Zeroize;
/// A Diffie-Hellman public key, corresponding to an [`EphemeralSecret`] or [`StaticSecret`] key.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub struct PublicKey(pub(crate) MontgomeryPoint);
impl From<[u8; 32]> for PublicKey {
/// Given a byte array, construct a x25519 `PublicKey`.
fn | (bytes: [u8; 32]) -> PublicKey {
PublicKey(MontgomeryPoint(bytes))
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
/// A short-lived Diffie-Hellman secret key that can only be used to compute a single
/// [`SharedSecret`].
///
/// This type is identical to the [`StaticSecret`] type, except that the
/// [`EphemeralSecret::diffie_hellman`] method consumes and then wipes the secret key, and there
/// are no serialization methods defined. This means that [`EphemeralSecret`]s can only be
/// generated from fresh randomness by [`EphemeralSecret::new`] and the compiler statically checks
/// that the resulting secret is used at most once.
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct EphemeralSecret(pub(crate) Scalar);
impl EphemeralSecret {
/// Perform a Diffie-Hellman key agreement between `self` and
/// `their_public` key to produce a [`SharedSecret`].
pub fn diffie_hellman(self, their_public: &PublicKey) -> SharedSecret {
SharedSecret(self.0 * their_public.0)
}
/// Generate an x25519 [`EphemeralSecret`] key.
pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self {
let mut bytes = [0u8; 32];
csprng.fill_bytes(&mut bytes);
EphemeralSecret(clamp_scalar(bytes))
}
}
impl<'a> From<&'a EphemeralSecret> for PublicKey {
/// Given an x25519 [`EphemeralSecret`] key, compute its corresponding [`PublicKey`].
fn from(secret: &'a EphemeralSecret) -> PublicKey {
PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery())
}
}
/// A Diffie-Hellman secret key that can be used to compute multiple [`SharedSecret`]s.
///
/// This type is identical to the [`EphemeralSecret`] type, except that the
/// [`StaticSecret::diffie_hellman`] method does not consume the secret key, and the type provides
/// serialization methods to save and load key material. This means that the secret may be used
/// multiple times (but does not *have to be*).
///
/// Some protocols, such as Noise, already handle the static/ephemeral distinction, so the
/// additional guarantees provided by [`EphemeralSecret`] are not helpful or would cause duplicate
/// code paths. In this case, it may be useful to
/// ```rust,ignore
/// use x25519_dalek::StaticSecret as SecretKey;
/// ```
/// since the only difference between the two is that [`StaticSecret`] does not enforce at
/// compile-time that the key is only used once.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[derive(Clone, Zeroize)]
#[zeroize(drop)]
pub struct StaticSecret(
#[cfg_attr(feature = "serde", serde(with = "AllowUnreducedScalarBytes"))] pub(crate) Scalar,
);
impl StaticSecret {
/// Perform a Diffie-Hellman key agreement between `self` and
/// `their_public` key to produce a `SharedSecret`.
pub fn diffie_hellman(&self, their_public: &PublicKey) -> SharedSecret {
SharedSecret(&self.0 * their_public.0)
}
/// Generate an x25519 key.
pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self {
let mut bytes = [0u8; 32];
csprng.fill_bytes(&mut bytes);
StaticSecret(clamp_scalar(bytes))
}
/// Extract this key's bytes for serialization.
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
}
impl From<[u8; 32]> for StaticSecret {
/// Load a secret key from a byte array.
fn from(bytes: [u8; 32]) -> StaticSecret {
StaticSecret(clamp_scalar(bytes))
}
}
impl<'a> From<&'a StaticSecret> for PublicKey {
/// Given an x25519 [`StaticSecret`] key, compute its corresponding [`PublicKey`].
fn from(secret: &'a StaticSecret) -> PublicKey {
PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery())
}
}
/// The result of a Diffie-Hellman key exchange.
///
/// Each party computes this using their [`EphemeralSecret`] or [`StaticSecret`] and their
/// counterparty's [`PublicKey`].
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct SharedSecret(pub(crate) MontgomeryPoint);
impl SharedSecret {
/// Convert this shared secret to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this shared secret key as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
/// "Decode" a scalar from a 32-byte array.
///
/// By "decode" here, what is really meant is applying key clamping by twiddling
/// some bits.
///
/// # Returns
///
/// A `Scalar`.
fn clamp_scalar(mut scalar: [u8; 32]) -> Scalar {
scalar[0] &= 248;
scalar[31] &= 127;
scalar[31] |= 64;
Scalar::from_bits(scalar)
}
/// The bare, byte-oriented x25519 function, exactly as specified in RFC7748.
///
/// This can be used with [`X25519_BASEPOINT_BYTES`] for people who
/// cannot use the better, safer, and faster DH API.
pub fn x25519(k: [u8; 32], u: [u8; 32]) -> [u8; 32] {
(clamp_scalar(k) * MontgomeryPoint(u)).to_bytes()
}
/// The X25519 basepoint, for use with the bare, byte-oriented x25519
/// function. This is provided for people who cannot use the typed
/// DH API for some reason.
pub const X25519_BASEPOINT_BYTES: [u8; 32] = [
9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
/// Derived serialization methods will not work on a StaticSecret because x25519 requires
/// non-canonical scalars which are rejected by curve25519-dalek. Thus we provide a way to convert
/// the bytes directly to a scalar using Serde's remote derive functionality.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[cfg_attr(feature = "serde", serde(remote = "Scalar"))]
struct AllowUnreducedScalarBytes(
#[cfg_attr(feature = "serde", serde(getter = "Scalar::to_bytes"))] [u8; 32],
);
impl From<AllowUnreducedScalarBytes> for Scalar {
fn from(bytes: AllowUnreducedScalarBytes) -> Scalar {
clamp_scalar(bytes.0)
}
}
#[cfg(test)]
mod test {
use super::*;
use rand_core::OsRng;
#[test]
fn byte_basepoint_matches_edwards_scalar_mul() {
let mut scalar_bytes = [0x37; 32];
for i in 0..32 {
scalar_bytes[i] += 2;
let result = x25519(scalar_bytes, X25519_BASEPOINT_BYTES);
let expected = (&ED25519_BASEPOINT_TABLE * &clamp_scalar(scalar_bytes))
.to_montgomery()
.to_bytes();
assert_eq!(result, expected);
}
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_public_key_roundtrip() {
use bincode;
let public_key = PublicKey::from(X25519_BASEPOINT_BYTES);
let encoded = bincode::serialize(&public_key).unwrap();
let decoded: PublicKey = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded.as_bytes(), public_key.as_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_public_key_matches_from_bytes() {
use bincode;
let expected = PublicKey::from(X25519_BASEPOINT_BYTES);
let decoded: PublicKey = bincode::deserialize(&X25519_BASEPOINT_BYTES).unwrap();
assert_eq!(decoded.as_bytes(), expected.as_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_static_secret_roundtrip() {
use bincode;
let static_secret = StaticSecret(clamp_scalar([0x24; 32]));
let encoded = bincode::serialize(&static_secret).unwrap();
let decoded: StaticSecret = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded.to_bytes(), static_secret.to_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_static_secret_matches_from_bytes() {
use bincode;
let expected = StaticSecret(clamp_scalar([0x24; 32]));
let clamped_bytes = clamp_scalar([0x24; 32]).to_bytes();
let decoded: StaticSecret = bincode::deserialize(&clamped_bytes).unwrap();
assert_eq!(decoded.to_bytes(), expected.to_bytes());
}
fn do_rfc7748_ladder_test1(input_scalar: [u8; 32], input_point: [u8; 32], expected: [u8; 32]) {
let result = x25519(input_scalar, input_point);
assert_eq!(result, expected);
}
#[test]
fn rfc7748_ladder_test1_vectorset1() {
let input_scalar: [u8; 32] = [
0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46,
0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x44,
0xba, 0x44, 0x9a, 0xc4,
];
let input_point: [u8; 32] = [
0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1,
0x5f, 0x7c, 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, 0x10, 0xa9, 0x03, 0xa6,
0xd0, 0xab, 0x1c, 0x4c,
];
let expected: [u8; 32] = [
0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d,
0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55,
0x77, 0xa2, 0x85, 0x52,
];
do_rfc7748_ladder_test1(input_scalar, input_point, expected);
}
#[test]
fn rfc7748_ladder_test1_vectorset2() {
let input_scalar: [u8; 32] = [
0x4b, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d,
0x6a, 0xf5, 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, 0x2c, 0xa4, 0x16, 0x9e,
0x79, 0x18, 0xba, 0x0d,
];
let input_point: [u8; 32] = [
0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38,
0xae, 0x2c, 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, 0xfc, 0x4c, 0xd5, 0x49,
0xc7, 0x15, 0xa4, 0x93,
];
let expected: [u8; 32] = [
0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8,
0x73, 0xf8, 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, 0xe6, 0xf8, 0xf7, 0x64,
0x7a, 0xac, 0x79, 0x57,
];
do_rfc7748_ladder_test1(input_scalar, input_point, expected);
}
#[test]
#[ignore] // Run only if you want to burn a lot of CPU doing 1,000,000 DH operations
fn rfc7748_ladder_test2() {
use curve25519_dalek::constants::X25519_BASEPOINT;
let mut k: [u8; 32] = X25519_BASEPOINT.0;
let mut u: [u8; 32] = X25519_BASEPOINT.0;
let mut result: [u8; 32];
macro_rules! do_iterations {
($n:expr) => {
for _ in 0..$n {
result = x25519(k, u);
// OBVIOUS THING THAT I'M GOING TO NOTE ANYWAY BECAUSE I'VE
// SEEN PEOPLE DO THIS WITH GOLANG'S STDLIB AND YOU SURE AS
// HELL SHOULDN'T DO HORRIBLY STUPID THINGS LIKE THIS WITH
// MY LIBRARY:
//
// NEVER EVER TREAT SCALARS AS POINTS AND/OR VICE VERSA.
//
// ↓↓ DON'T DO THIS ↓↓
u = k.clone();
k = result;
}
};
}
// After one iteration:
// 422c8e7a6227d7bca1350b3e2bb7279f7897b87bb6854b783c60e80311ae3079
// After 1,000 iterations:
// 684cf59ba83309552800ef566f2f4d3c1c3887c49360e3875f2eb94d99532c51
// After 1,000,000 iterations:
// 7c3911e0ab2586fd864497297e575e6f3bc601c0883c30df5f4dd2d24f665424
do_iterations!(1);
assert_eq!(
k,
[
0x42, 0x2c, 0x8e, 0x7a, 0x62, 0x27, 0xd7, 0xbc, 0xa1, 0x35, 0x0b, 0x3e, 0x2b, 0xb7,
0x27, 0x9f, 0x78, 0x97, 0xb8, 0x7b, 0xb6, 0x85, 0x4b, 0x78, 0x3c, 0x60, 0xe8, 0x03,
0x11, 0xae, 0x30, 0x79,
]
);
do_iterations!(999);
assert_eq!(
k,
[
0x68, 0x4c, 0xf5, 0x9b, 0xa8, 0x33, 0x09, 0x55, 0x28, 0x00, 0xef, 0x56, 0x6f, 0x2f,
0x4d, 0x3c, 0x1c, 0x38, 0x87, 0xc4, 0x93, 0x60, 0xe3, 0x87, 0x5f, 0x2e, 0xb9, 0x4d,
0x99, 0x53, 0x2c, 0x51,
]
);
do_iterations!(999_000);
assert_eq!(
k,
[
0x7c, 0x39, 0x11, 0xe0, 0xab, 0x25, 0x86, 0xfd, 0x86, 0x44, 0x97, 0x29, 0x7e, 0x57,
0x5e, 0x6f, 0x3b, 0xc6, 0x01, 0xc0, 0x88, 0x3c, 0x30, 0xdf, 0x5f, 0x4d, 0xd2, 0xd2,
0x4f, 0x66, 0x54, 0x24,
]
);
}
}
| from | identifier_name |
x25519.rs | // -*- mode: rust; -*-
//
// This file is part of x25519-dalek.
// Copyright (c) 2017-2019 isis lovecruft
// Copyright (c) 2019 DebugSteven
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
// - DebugSteven <[email protected]>
//! x25519 Diffie-Hellman key exchange
//!
//! This implements x25519 key exchange as specified by Mike Hamburg
//! and Adam Langley in [RFC7748](https://tools.ietf.org/html/rfc7748).
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
use curve25519_dalek::montgomery::MontgomeryPoint;
use curve25519_dalek::scalar::Scalar;
use rand_core::CryptoRng;
use rand_core::RngCore;
use zeroize::Zeroize;
/// A Diffie-Hellman public key, corresponding to an [`EphemeralSecret`] or [`StaticSecret`] key.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub struct PublicKey(pub(crate) MontgomeryPoint);
impl From<[u8; 32]> for PublicKey {
/// Given a byte array, construct a x25519 `PublicKey`.
fn from(bytes: [u8; 32]) -> PublicKey {
PublicKey(MontgomeryPoint(bytes))
}
}
impl PublicKey {
/// Convert this public key to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this public key as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
/// A short-lived Diffie-Hellman secret key that can only be used to compute a single
/// [`SharedSecret`].
///
/// This type is identical to the [`StaticSecret`] type, except that the
/// [`EphemeralSecret::diffie_hellman`] method consumes and then wipes the secret key, and there
/// are no serialization methods defined. This means that [`EphemeralSecret`]s can only be
/// generated from fresh randomness by [`EphemeralSecret::new`] and the compiler statically checks
/// that the resulting secret is used at most once.
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct EphemeralSecret(pub(crate) Scalar);
impl EphemeralSecret {
/// Perform a Diffie-Hellman key agreement between `self` and
/// `their_public` key to produce a [`SharedSecret`].
pub fn diffie_hellman(self, their_public: &PublicKey) -> SharedSecret {
SharedSecret(self.0 * their_public.0)
}
/// Generate an x25519 [`EphemeralSecret`] key.
pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self {
let mut bytes = [0u8; 32];
csprng.fill_bytes(&mut bytes);
EphemeralSecret(clamp_scalar(bytes))
}
}
impl<'a> From<&'a EphemeralSecret> for PublicKey {
/// Given an x25519 [`EphemeralSecret`] key, compute its corresponding [`PublicKey`].
fn from(secret: &'a EphemeralSecret) -> PublicKey {
PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery())
}
}
/// A Diffie-Hellman secret key that can be used to compute multiple [`SharedSecret`]s.
///
/// This type is identical to the [`EphemeralSecret`] type, except that the
/// [`StaticSecret::diffie_hellman`] method does not consume the secret key, and the type provides
/// serialization methods to save and load key material. This means that the secret may be used
/// multiple times (but does not *have to be*).
///
/// Some protocols, such as Noise, already handle the static/ephemeral distinction, so the
/// additional guarantees provided by [`EphemeralSecret`] are not helpful or would cause duplicate
/// code paths. In this case, it may be useful to
/// ```rust,ignore
/// use x25519_dalek::StaticSecret as SecretKey;
/// ```
/// since the only difference between the two is that [`StaticSecret`] does not enforce at
/// compile-time that the key is only used once.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[derive(Clone, Zeroize)]
#[zeroize(drop)]
pub struct StaticSecret(
#[cfg_attr(feature = "serde", serde(with = "AllowUnreducedScalarBytes"))] pub(crate) Scalar,
);
impl StaticSecret {
/// Perform a Diffie-Hellman key agreement between `self` and
/// `their_public` key to produce a `SharedSecret`.
pub fn diffie_hellman(&self, their_public: &PublicKey) -> SharedSecret {
SharedSecret(&self.0 * their_public.0)
}
/// Generate an x25519 key.
pub fn new<T: RngCore + CryptoRng>(mut csprng: T) -> Self {
let mut bytes = [0u8; 32];
csprng.fill_bytes(&mut bytes);
StaticSecret(clamp_scalar(bytes))
}
/// Extract this key's bytes for serialization.
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
}
impl From<[u8; 32]> for StaticSecret {
/// Load a secret key from a byte array.
fn from(bytes: [u8; 32]) -> StaticSecret {
StaticSecret(clamp_scalar(bytes))
}
}
impl<'a> From<&'a StaticSecret> for PublicKey {
/// Given an x25519 [`StaticSecret`] key, compute its corresponding [`PublicKey`].
fn from(secret: &'a StaticSecret) -> PublicKey {
PublicKey((&ED25519_BASEPOINT_TABLE * &secret.0).to_montgomery())
}
}
/// The result of a Diffie-Hellman key exchange.
///
/// Each party computes this using their [`EphemeralSecret`] or [`StaticSecret`] and their
/// counterparty's [`PublicKey`].
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct SharedSecret(pub(crate) MontgomeryPoint);
impl SharedSecret {
/// Convert this shared secret to a byte array.
#[inline]
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this shared secret key as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
/// "Decode" a scalar from a 32-byte array.
///
/// By "decode" here, what is really meant is applying key clamping by twiddling
/// some bits.
///
/// # Returns
///
/// A `Scalar`.
fn clamp_scalar(mut scalar: [u8; 32]) -> Scalar {
scalar[0] &= 248;
scalar[31] &= 127;
scalar[31] |= 64;
Scalar::from_bits(scalar)
}
/// The bare, byte-oriented x25519 function, exactly as specified in RFC7748.
///
/// This can be used with [`X25519_BASEPOINT_BYTES`] for people who
/// cannot use the better, safer, and faster DH API.
pub fn x25519(k: [u8; 32], u: [u8; 32]) -> [u8; 32] {
(clamp_scalar(k) * MontgomeryPoint(u)).to_bytes()
}
/// The X25519 basepoint, for use with the bare, byte-oriented x25519
/// function. This is provided for people who cannot use the typed
/// DH API for some reason.
pub const X25519_BASEPOINT_BYTES: [u8; 32] = [
9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
/// Derived serialization methods will not work on a StaticSecret because x25519 requires
/// non-canonical scalars which are rejected by curve25519-dalek. Thus we provide a way to convert
/// the bytes directly to a scalar using Serde's remote derive functionality.
#[cfg_attr(feature = "serde", serde(crate = "our_serde"))]
#[cfg_attr(
feature = "serde",
derive(our_serde::Serialize, our_serde::Deserialize)
)]
#[cfg_attr(feature = "serde", serde(remote = "Scalar"))]
struct AllowUnreducedScalarBytes(
#[cfg_attr(feature = "serde", serde(getter = "Scalar::to_bytes"))] [u8; 32],
);
impl From<AllowUnreducedScalarBytes> for Scalar {
fn from(bytes: AllowUnreducedScalarBytes) -> Scalar {
clamp_scalar(bytes.0)
}
}
#[cfg(test)]
mod test {
use super::*;
use rand_core::OsRng;
#[test]
fn byte_basepoint_matches_edwards_scalar_mul() {
let mut scalar_bytes = [0x37; 32];
for i in 0..32 {
scalar_bytes[i] += 2;
let result = x25519(scalar_bytes, X25519_BASEPOINT_BYTES);
let expected = (&ED25519_BASEPOINT_TABLE * &clamp_scalar(scalar_bytes))
.to_montgomery() | }
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_public_key_roundtrip() {
use bincode;
let public_key = PublicKey::from(X25519_BASEPOINT_BYTES);
let encoded = bincode::serialize(&public_key).unwrap();
let decoded: PublicKey = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded.as_bytes(), public_key.as_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_public_key_matches_from_bytes() {
use bincode;
let expected = PublicKey::from(X25519_BASEPOINT_BYTES);
let decoded: PublicKey = bincode::deserialize(&X25519_BASEPOINT_BYTES).unwrap();
assert_eq!(decoded.as_bytes(), expected.as_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_static_secret_roundtrip() {
use bincode;
let static_secret = StaticSecret(clamp_scalar([0x24; 32]));
let encoded = bincode::serialize(&static_secret).unwrap();
let decoded: StaticSecret = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded.to_bytes(), static_secret.to_bytes());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_static_secret_matches_from_bytes() {
use bincode;
let expected = StaticSecret(clamp_scalar([0x24; 32]));
let clamped_bytes = clamp_scalar([0x24; 32]).to_bytes();
let decoded: StaticSecret = bincode::deserialize(&clamped_bytes).unwrap();
assert_eq!(decoded.to_bytes(), expected.to_bytes());
}
fn do_rfc7748_ladder_test1(input_scalar: [u8; 32], input_point: [u8; 32], expected: [u8; 32]) {
let result = x25519(input_scalar, input_point);
assert_eq!(result, expected);
}
#[test]
fn rfc7748_ladder_test1_vectorset1() {
let input_scalar: [u8; 32] = [
0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46,
0x5e, 0xdd, 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, 0x50, 0x6a, 0x22, 0x44,
0xba, 0x44, 0x9a, 0xc4,
];
let input_point: [u8; 32] = [
0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1,
0x5f, 0x7c, 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, 0x10, 0xa9, 0x03, 0xa6,
0xd0, 0xab, 0x1c, 0x4c,
];
let expected: [u8; 32] = [
0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d,
0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55,
0x77, 0xa2, 0x85, 0x52,
];
do_rfc7748_ladder_test1(input_scalar, input_point, expected);
}
#[test]
fn rfc7748_ladder_test1_vectorset2() {
let input_scalar: [u8; 32] = [
0x4b, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d,
0x6a, 0xf5, 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, 0x2c, 0xa4, 0x16, 0x9e,
0x79, 0x18, 0xba, 0x0d,
];
let input_point: [u8; 32] = [
0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38,
0xae, 0x2c, 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, 0xfc, 0x4c, 0xd5, 0x49,
0xc7, 0x15, 0xa4, 0x93,
];
let expected: [u8; 32] = [
0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8,
0x73, 0xf8, 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, 0xe6, 0xf8, 0xf7, 0x64,
0x7a, 0xac, 0x79, 0x57,
];
do_rfc7748_ladder_test1(input_scalar, input_point, expected);
}
#[test]
#[ignore] // Run only if you want to burn a lot of CPU doing 1,000,000 DH operations
fn rfc7748_ladder_test2() {
use curve25519_dalek::constants::X25519_BASEPOINT;
let mut k: [u8; 32] = X25519_BASEPOINT.0;
let mut u: [u8; 32] = X25519_BASEPOINT.0;
let mut result: [u8; 32];
macro_rules! do_iterations {
($n:expr) => {
for _ in 0..$n {
result = x25519(k, u);
// OBVIOUS THING THAT I'M GOING TO NOTE ANYWAY BECAUSE I'VE
// SEEN PEOPLE DO THIS WITH GOLANG'S STDLIB AND YOU SURE AS
// HELL SHOULDN'T DO HORRIBLY STUPID THINGS LIKE THIS WITH
// MY LIBRARY:
//
// NEVER EVER TREAT SCALARS AS POINTS AND/OR VICE VERSA.
//
// ↓↓ DON'T DO THIS ↓↓
u = k.clone();
k = result;
}
};
}
// After one iteration:
// 422c8e7a6227d7bca1350b3e2bb7279f7897b87bb6854b783c60e80311ae3079
// After 1,000 iterations:
// 684cf59ba83309552800ef566f2f4d3c1c3887c49360e3875f2eb94d99532c51
// After 1,000,000 iterations:
// 7c3911e0ab2586fd864497297e575e6f3bc601c0883c30df5f4dd2d24f665424
do_iterations!(1);
assert_eq!(
k,
[
0x42, 0x2c, 0x8e, 0x7a, 0x62, 0x27, 0xd7, 0xbc, 0xa1, 0x35, 0x0b, 0x3e, 0x2b, 0xb7,
0x27, 0x9f, 0x78, 0x97, 0xb8, 0x7b, 0xb6, 0x85, 0x4b, 0x78, 0x3c, 0x60, 0xe8, 0x03,
0x11, 0xae, 0x30, 0x79,
]
);
do_iterations!(999);
assert_eq!(
k,
[
0x68, 0x4c, 0xf5, 0x9b, 0xa8, 0x33, 0x09, 0x55, 0x28, 0x00, 0xef, 0x56, 0x6f, 0x2f,
0x4d, 0x3c, 0x1c, 0x38, 0x87, 0xc4, 0x93, 0x60, 0xe3, 0x87, 0x5f, 0x2e, 0xb9, 0x4d,
0x99, 0x53, 0x2c, 0x51,
]
);
do_iterations!(999_000);
assert_eq!(
k,
[
0x7c, 0x39, 0x11, 0xe0, 0xab, 0x25, 0x86, 0xfd, 0x86, 0x44, 0x97, 0x29, 0x7e, 0x57,
0x5e, 0x6f, 0x3b, 0xc6, 0x01, 0xc0, 0x88, 0x3c, 0x30, 0xdf, 0x5f, 0x4d, 0xd2, 0xd2,
0x4f, 0x66, 0x54, 0x24,
]
);
}
} | .to_bytes();
assert_eq!(result, expected);
} | random_line_split |
engine.rs | use std::cmp;
use std::mem;
use std::collections::HashMap;
use std::thread::{self, Builder, Thread};
use std::time::Duration;
use std::sync::mpsc::{self, Sender, Receiver};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Mutex;
use cpal;
use cpal::UnknownTypeBuffer;
use cpal::Voice;
use cpal::Endpoint;
use conversions::Sample;
use source::Source;
use source::UniformSourceIterator;
use time;
/// Duration of a loop of the engine in milliseconds.
const FIXED_STEP_MS: u32 = 17;
/// Duration of a loop of the engine in nanoseconds.
const FIXED_STEP_NS: u64 = FIXED_STEP_MS as u64 * 1000000;
/// The internal engine of this library.
///
/// Each `Engine` owns a thread that runs in the background and plays the audio.
pub struct Engine {
/// Communication with the background thread.
commands: Mutex<Sender<Command>>,
/// The background thread that executes commands.
thread: Option<Thread>,
/// Contains the format (channels count and samples rate) of the voice of each endpoint.
///
/// The key of the hashmap is the name of the endpoint, and the value are the channels count
/// and samples rate.
voices_formats: Mutex<HashMap<String, (u16, u32)>>,
}
impl Engine {
/// Builds the engine.
pub fn new() -> Engine {
let (tx, rx) = mpsc::channel();
// we ignore errors when creating the background thread
// the user won't get any audio, but that's better than a panic
let thread = Builder::new().name("rodio audio processing".to_string())
.spawn(move || background(rx))
.ok().map(|jg| jg.thread().clone());
Engine {
commands: Mutex::new(tx),
thread: thread,
voices_formats: Mutex::new(HashMap::with_capacity(1)),
}
}
/// Builds a new sink that targets a given endpoint.
pub fn start(&self, endpoint: &Endpoint) -> Handle {
// try looking for an existing `Voice`, or create one if there isn't one
// `new_voice` is `Some` if a new voice has been created
let (new_voice, channels_count, samples_rate) = {
let mut voices_formats = self.voices_formats.lock().unwrap();
// will contain the new voice, or `None` if no new voice is needed
let mut new_voice = None;
let &mut (c, s) = voices_formats.entry(endpoint.get_name()).or_insert_with(|| {
// TODO: handle possible errors here
// determining the format to use for the new voice
let format = endpoint.get_supported_formats_list().unwrap().fold(None, |f1, f2| {
if f1.is_none() {
return Some(f2);
}
let f1 = f1.unwrap();
// we privilege f32 formats to avoid a conversion
if f2.data_type == cpal::SampleFormat::F32 && f1.data_type!= cpal::SampleFormat::F32 {
return Some(f2);
}
// do not go below 44100 if possible
if f1.samples_rate.0 < 44100 {
return Some(f2);
}
// priviledge outputs with 2 channels for now
if f2.channels.len() == 2 && f1.channels.len()!= 2 {
return Some(f2);
}
Some(f1)
}).expect("The endpoint doesn't support any format!?");
new_voice = Some(Voice::new(&endpoint, &format).unwrap());
(format.channels.len() as u16, format.samples_rate.0)
});
(new_voice, c, s)
};
// `next_sounds` contains a Vec that can later be used to append new iterators to the sink
let next_sounds = Arc::new(Mutex::new(Vec::new()));
// the `QueueIterator` is the main source of samples and will be read by the background
// thread
let source = QueueIterator {
current: Box::new(None.into_iter()),
next: next_sounds.clone(),
};
// we use the pointer of the `Arc` of `next_sounds` as an identifier for this sink for the
// purpose of communicating with the background thread
let source_id = &*next_sounds as *const Mutex<_> as *const u8 as usize;
// at each loop, the background thread will store the remaining time of the sound in this
// value
// the initial value is `0` since there's no sound
let remaining_duration_ms = Arc::new(AtomicUsize::new(0 as usize));
// send the play command, passing everything to the background thread
{
let command = Command::Play(endpoint.clone(), new_voice, source,
remaining_duration_ms.clone());
self.commands.lock().unwrap().send(command).unwrap();
}
Handle {
engine: self,
source_id: source_id,
remaining_duration_ms: remaining_duration_ms,
samples_rate: samples_rate,
channels: channels_count,
next_sounds: next_sounds,
}
}
}
/// A sink.
///
/// Note that dropping the handle doesn't delete the sink. You must call `stop` explicitely.
pub struct Handle<'a> {
engine: &'a Engine,
source_id: usize,
remaining_duration_ms: Arc<AtomicUsize>,
samples_rate: u32,
channels: u16,
// Holds a pointer to the list of iterators to be played after the current one has
// finished playing.
next_sounds: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>,
}
impl<'a> Handle<'a> {
/// Appends a new source of data after the current one.
#[inline]
pub fn append<S>(&self, source: S)
where S: Source + Send +'static, S::Item: Sample + Clone + Send
{
// adding the estimated duration of the sound to `remaining_duration_ms`
if let Some(duration) = source.get_total_duration() {
let duration = duration.as_secs() as usize * 1000 +
duration.subsec_nanos() as usize / 1000000;
self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed);
} else {
let duration = source.size_hint().0 * 1000 / (source.get_samples_rate() as usize *
source.get_channels() as usize);
self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed);
}
// pushing the source to `next_sounds`
let source = UniformSourceIterator::new(source, self.channels, self.samples_rate);
let source = Box::new(source);
self.next_sounds.lock().unwrap().push(source);
}
/// Changes the volume of the sound played by this sink.
#[inline]
pub fn set_volume(&self, value: f32) {
let commands = self.engine.commands.lock().unwrap();
commands.send(Command::SetVolume(self.source_id, value)).unwrap();
}
/// Stops the sound.
// note that this method could take `self` instead of `&self`, but it makes the `Sink` object's
// life easier not to take `self`
#[inline]
pub fn stop(&self) {
let commands = self.engine.commands.lock().unwrap();
commands.send(Command::Stop(self.source_id)).unwrap();
if let Some(ref thread) = self.engine.thread {
thread.unpark();
}
}
/// Returns the minimum estimated duration of the sound being played by this sink.
#[inline]
pub fn get_min_remaining_duration(&self) -> Duration {
Duration::from_millis(self.remaining_duration_ms.load(Ordering::Relaxed) as u64)
}
}
/// A command sent by the regular threads to the background thread.
pub enum Command {
/// Adds a new voice to the list of voices to process.
Play(Endpoint, Option<Voice>, QueueIterator, Arc<AtomicUsize>),
/// Stops a voice.
Stop(usize),
/// Changes the volume of a voice.
SetVolume(usize, f32),
}
fn background(rx: Receiver<Command>) {
// for each endpoint name, stores the voice and the list of sounds with their volume
let mut voices: HashMap<String, (Voice, Vec<(QueueIterator, Arc<AtomicUsize>, f32)>)> = HashMap::new();
// list of sounds to stop playing
let mut sounds_to_remove: Vec<*const Mutex<Vec<Box<Iterator<Item = f32> + Send>>>> = Vec::new();
// stores the time when the next loop must start
let mut next_loop_timer = time::precise_time_ns();
loop {
// sleeping so that we get a loop every `FIXED_STEP_MS` millisecond
{
let now = time::precise_time_ns();
if next_loop_timer > now + 1000000 /* 1ms */ {
let sleep = next_loop_timer - now;
thread::park_timeout(Duration::from_millis(sleep / 1000000));
}
next_loop_timer += FIXED_STEP_NS;
}
// polling for new commands
if let Ok(command) = rx.try_recv() {
match command {
Command::Play(endpoint, new_voice, decoder, remaining_duration_ms) => {
let mut entry = voices.entry(endpoint.get_name()).or_insert_with(|| {
(new_voice.unwrap(), Vec::new())
});
entry.1.push((decoder, remaining_duration_ms, 1.0));
},
Command::Stop(decoder) => {
for (_, &mut (_, ref mut sounds)) in voices.iter_mut() {
sounds.retain(|dec| {
&*dec.0.next as *const Mutex<_> as *const u8 as usize!= decoder
})
}
},
Command::SetVolume(decoder, volume) => {
for (_, &mut (_, ref mut sounds)) in voices.iter_mut() {
if let Some(d) = sounds.iter_mut()
.find(|dec| &*dec.0.next as *const Mutex<_> as *const u8 as usize == decoder)
{
d.2 = volume;
}
}
},
}
}
// removing sounds that have finished playing
for decoder in mem::replace(&mut sounds_to_remove, Vec::new()) {
for (_, &mut (_, ref mut sounds)) in voices.iter_mut() {
sounds.retain(|dec| &*dec.0.next as *const Mutex<_>!= decoder);
}
}
// updating the existing sounds
for (_, &mut (ref mut voice, ref mut sounds)) in voices.iter_mut() {
// we want the number of samples remaining to be processed by the sound to be around
// twice the number of samples that are being processed in one loop, with a minimum of 2 periods
let samples_read_per_loop = (voice.get_samples_rate().0 * voice.get_channels() as u32 * FIXED_STEP_MS / 1000) as usize;
let pending_samples = voice.get_pending_samples();
let period = cmp::max(voice.get_period(), 1);
let samples_required_in_buffer = cmp::max(samples_read_per_loop * 2, period * 2);
// writing to the output
if pending_samples < samples_required_in_buffer {
// building an iterator that produces samples from `sounds`
let samples_iter = (0..).map(|_| {
sounds.iter_mut().map(|s| s.0.next().unwrap_or(0.0) * s.2)
.fold(0.0, |a, b| { let v = a + b; if v > 1.0 { 1.0 } else if v < -1.0 { -1.0 } else { v } })
});
let mut buffer = voice.append_data(samples_required_in_buffer - pending_samples);
match buffer {
UnknownTypeBuffer::U16(ref mut buffer) => {
for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_u16(); }
},
UnknownTypeBuffer::I16(ref mut buffer) => {
for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_i16(); }
},
UnknownTypeBuffer::F32(ref mut buffer) => {
for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i; }
},
}
}
// updating the contents of `remaining_duration_ms`
for &(ref decoder, ref remaining_duration_ms, _) in sounds.iter() {
let (num_samples, _) = decoder.size_hint();
// TODO: differenciate sounds from this sink from sounds from other sinks
let num_samples = num_samples + voice.get_pending_samples();
let value = (num_samples as u64 * 1000 / (voice.get_channels() as u64 *
voice.get_samples_rate().0 as u64)) as u32;
remaining_duration_ms.store(value as usize, Ordering::Relaxed);
}
// TODO: do better
voice.play();
}
}
}
/// Main source of samples for a voice.
pub struct QueueIterator {
/// The current iterator that produces samples.
current: Box<Iterator<Item = f32> + Send>,
/// A `Vec` containing the next iterators to play. Shared with other threads so they can add
/// sounds to the list.
next: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>,
}
impl Iterator for QueueIterator {
type Item = f32;
#[inline]
fn next(&mut self) -> Option<f32> {
loop {
// basic situation that will happen most of the time
if let Some(sample) = self.current.next() {
return Some(sample);
}
let next = {
let mut next = self.next.lock().unwrap();
if next.len() == 0 {
// if there's no iter waiting, we create a dummy iter with 1000 null samples
// this avoids a spinlock
Box::new((0.. 1000).map(|_| 0.0f32)) as Box<Iterator<Item = f32> + Send>
} else {
next.remove(0)
}
};
self.current = next;
}
}
#[inline]
fn | (&self) -> (usize, Option<usize>) {
// TODO: slow? benchmark this
let next_hints = self.next.lock().unwrap().iter()
.map(|i| i.size_hint().0).fold(0, |a, b| a + b);
(self.current.size_hint().0 + next_hints, None)
}
}
| size_hint | identifier_name |
engine.rs | use std::cmp;
use std::mem;
use std::collections::HashMap;
use std::thread::{self, Builder, Thread};
use std::time::Duration;
use std::sync::mpsc::{self, Sender, Receiver};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Mutex;
use cpal;
use cpal::UnknownTypeBuffer;
use cpal::Voice;
use cpal::Endpoint;
use conversions::Sample;
use source::Source;
use source::UniformSourceIterator;
use time;
/// Duration of a loop of the engine in milliseconds.
const FIXED_STEP_MS: u32 = 17;
/// Duration of a loop of the engine in nanoseconds.
const FIXED_STEP_NS: u64 = FIXED_STEP_MS as u64 * 1000000;
/// The internal engine of this library.
///
/// Each `Engine` owns a thread that runs in the background and plays the audio.
pub struct Engine {
/// Communication with the background thread.
commands: Mutex<Sender<Command>>,
/// The background thread that executes commands.
thread: Option<Thread>,
/// Contains the format (channels count and samples rate) of the voice of each endpoint.
///
/// The key of the hashmap is the name of the endpoint, and the value are the channels count
/// and samples rate.
voices_formats: Mutex<HashMap<String, (u16, u32)>>,
}
impl Engine {
/// Builds the engine.
pub fn new() -> Engine {
let (tx, rx) = mpsc::channel();
// we ignore errors when creating the background thread
// the user won't get any audio, but that's better than a panic
let thread = Builder::new().name("rodio audio processing".to_string())
.spawn(move || background(rx))
.ok().map(|jg| jg.thread().clone());
Engine {
commands: Mutex::new(tx),
thread: thread,
voices_formats: Mutex::new(HashMap::with_capacity(1)),
}
}
/// Builds a new sink that targets a given endpoint.
pub fn start(&self, endpoint: &Endpoint) -> Handle {
// try looking for an existing `Voice`, or create one if there isn't one
// `new_voice` is `Some` if a new voice has been created
let (new_voice, channels_count, samples_rate) = {
let mut voices_formats = self.voices_formats.lock().unwrap();
// will contain the new voice, or `None` if no new voice is needed
let mut new_voice = None;
let &mut (c, s) = voices_formats.entry(endpoint.get_name()).or_insert_with(|| {
// TODO: handle possible errors here
// determining the format to use for the new voice
let format = endpoint.get_supported_formats_list().unwrap().fold(None, |f1, f2| {
if f1.is_none() {
return Some(f2);
}
let f1 = f1.unwrap();
// we privilege f32 formats to avoid a conversion
if f2.data_type == cpal::SampleFormat::F32 && f1.data_type!= cpal::SampleFormat::F32 {
return Some(f2);
}
// do not go below 44100 if possible
if f1.samples_rate.0 < 44100 {
return Some(f2);
}
// priviledge outputs with 2 channels for now
if f2.channels.len() == 2 && f1.channels.len()!= 2 {
return Some(f2);
}
Some(f1)
}).expect("The endpoint doesn't support any format!?");
new_voice = Some(Voice::new(&endpoint, &format).unwrap());
(format.channels.len() as u16, format.samples_rate.0)
});
(new_voice, c, s)
};
// `next_sounds` contains a Vec that can later be used to append new iterators to the sink
let next_sounds = Arc::new(Mutex::new(Vec::new()));
// the `QueueIterator` is the main source of samples and will be read by the background
// thread
let source = QueueIterator {
current: Box::new(None.into_iter()),
next: next_sounds.clone(),
};
// we use the pointer of the `Arc` of `next_sounds` as an identifier for this sink for the
// purpose of communicating with the background thread
let source_id = &*next_sounds as *const Mutex<_> as *const u8 as usize;
// at each loop, the background thread will store the remaining time of the sound in this
// value
// the initial value is `0` since there's no sound
let remaining_duration_ms = Arc::new(AtomicUsize::new(0 as usize));
// send the play command, passing everything to the background thread
{
let command = Command::Play(endpoint.clone(), new_voice, source,
remaining_duration_ms.clone());
self.commands.lock().unwrap().send(command).unwrap();
}
Handle {
engine: self,
source_id: source_id,
remaining_duration_ms: remaining_duration_ms,
samples_rate: samples_rate,
channels: channels_count,
next_sounds: next_sounds,
}
}
}
/// A sink.
///
/// Note that dropping the handle doesn't delete the sink. You must call `stop` explicitely.
pub struct Handle<'a> {
engine: &'a Engine,
source_id: usize,
remaining_duration_ms: Arc<AtomicUsize>,
samples_rate: u32,
channels: u16,
// Holds a pointer to the list of iterators to be played after the current one has
// finished playing.
next_sounds: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>,
}
impl<'a> Handle<'a> {
/// Appends a new source of data after the current one.
#[inline]
pub fn append<S>(&self, source: S)
where S: Source + Send +'static, S::Item: Sample + Clone + Send
{
// adding the estimated duration of the sound to `remaining_duration_ms`
if let Some(duration) = source.get_total_duration() {
let duration = duration.as_secs() as usize * 1000 +
duration.subsec_nanos() as usize / 1000000;
self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed);
} else {
let duration = source.size_hint().0 * 1000 / (source.get_samples_rate() as usize *
source.get_channels() as usize);
self.remaining_duration_ms.fetch_add(duration, Ordering::Relaxed);
}
// pushing the source to `next_sounds`
let source = UniformSourceIterator::new(source, self.channels, self.samples_rate);
let source = Box::new(source);
self.next_sounds.lock().unwrap().push(source);
}
/// Changes the volume of the sound played by this sink.
#[inline]
pub fn set_volume(&self, value: f32) {
let commands = self.engine.commands.lock().unwrap();
commands.send(Command::SetVolume(self.source_id, value)).unwrap();
}
/// Stops the sound.
// note that this method could take `self` instead of `&self`, but it makes the `Sink` object's
// life easier not to take `self`
#[inline]
pub fn stop(&self) {
let commands = self.engine.commands.lock().unwrap();
commands.send(Command::Stop(self.source_id)).unwrap();
if let Some(ref thread) = self.engine.thread {
thread.unpark();
}
}
/// Returns the minimum estimated duration of the sound being played by this sink.
#[inline]
pub fn get_min_remaining_duration(&self) -> Duration {
Duration::from_millis(self.remaining_duration_ms.load(Ordering::Relaxed) as u64)
}
}
/// A command sent by the regular threads to the background thread.
pub enum Command {
/// Adds a new voice to the list of voices to process.
Play(Endpoint, Option<Voice>, QueueIterator, Arc<AtomicUsize>),
/// Stops a voice.
Stop(usize),
/// Changes the volume of a voice.
SetVolume(usize, f32),
}
fn background(rx: Receiver<Command>) {
// for each endpoint name, stores the voice and the list of sounds with their volume
let mut voices: HashMap<String, (Voice, Vec<(QueueIterator, Arc<AtomicUsize>, f32)>)> = HashMap::new();
// list of sounds to stop playing
let mut sounds_to_remove: Vec<*const Mutex<Vec<Box<Iterator<Item = f32> + Send>>>> = Vec::new();
// stores the time when the next loop must start
let mut next_loop_timer = time::precise_time_ns();
loop {
// sleeping so that we get a loop every `FIXED_STEP_MS` millisecond
{
let now = time::precise_time_ns();
if next_loop_timer > now + 1000000 /* 1ms */ {
let sleep = next_loop_timer - now;
thread::park_timeout(Duration::from_millis(sleep / 1000000));
}
next_loop_timer += FIXED_STEP_NS;
}
// polling for new commands
if let Ok(command) = rx.try_recv() {
match command {
Command::Play(endpoint, new_voice, decoder, remaining_duration_ms) => {
let mut entry = voices.entry(endpoint.get_name()).or_insert_with(|| {
(new_voice.unwrap(), Vec::new())
});
entry.1.push((decoder, remaining_duration_ms, 1.0));
},
Command::Stop(decoder) => {
for (_, &mut (_, ref mut sounds)) in voices.iter_mut() {
sounds.retain(|dec| {
&*dec.0.next as *const Mutex<_> as *const u8 as usize!= decoder
})
}
},
Command::SetVolume(decoder, volume) => {
for (_, &mut (_, ref mut sounds)) in voices.iter_mut() {
if let Some(d) = sounds.iter_mut()
.find(|dec| &*dec.0.next as *const Mutex<_> as *const u8 as usize == decoder)
{
d.2 = volume;
}
}
},
}
}
// removing sounds that have finished playing
for decoder in mem::replace(&mut sounds_to_remove, Vec::new()) {
for (_, &mut (_, ref mut sounds)) in voices.iter_mut() {
sounds.retain(|dec| &*dec.0.next as *const Mutex<_>!= decoder);
}
}
// updating the existing sounds
for (_, &mut (ref mut voice, ref mut sounds)) in voices.iter_mut() {
// we want the number of samples remaining to be processed by the sound to be around
// twice the number of samples that are being processed in one loop, with a minimum of 2 periods
let samples_read_per_loop = (voice.get_samples_rate().0 * voice.get_channels() as u32 * FIXED_STEP_MS / 1000) as usize;
let pending_samples = voice.get_pending_samples();
let period = cmp::max(voice.get_period(), 1);
let samples_required_in_buffer = cmp::max(samples_read_per_loop * 2, period * 2);
// writing to the output
if pending_samples < samples_required_in_buffer {
// building an iterator that produces samples from `sounds`
let samples_iter = (0..).map(|_| {
sounds.iter_mut().map(|s| s.0.next().unwrap_or(0.0) * s.2)
.fold(0.0, |a, b| { let v = a + b; if v > 1.0 { 1.0 } else if v < -1.0 { -1.0 } else { v } })
});
let mut buffer = voice.append_data(samples_required_in_buffer - pending_samples);
match buffer {
UnknownTypeBuffer::U16(ref mut buffer) => {
for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_u16(); }
},
UnknownTypeBuffer::I16(ref mut buffer) => {
for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i.to_i16(); }
},
UnknownTypeBuffer::F32(ref mut buffer) => {
for (o, i) in buffer.iter_mut().zip(samples_iter) { *o = i; }
},
}
}
// updating the contents of `remaining_duration_ms`
for &(ref decoder, ref remaining_duration_ms, _) in sounds.iter() {
let (num_samples, _) = decoder.size_hint();
// TODO: differenciate sounds from this sink from sounds from other sinks
let num_samples = num_samples + voice.get_pending_samples();
let value = (num_samples as u64 * 1000 / (voice.get_channels() as u64 *
voice.get_samples_rate().0 as u64)) as u32;
remaining_duration_ms.store(value as usize, Ordering::Relaxed);
}
// TODO: do better
voice.play();
}
}
}
/// Main source of samples for a voice.
pub struct QueueIterator {
/// The current iterator that produces samples.
current: Box<Iterator<Item = f32> + Send>,
/// A `Vec` containing the next iterators to play. Shared with other threads so they can add
/// sounds to the list.
next: Arc<Mutex<Vec<Box<Iterator<Item = f32> + Send>>>>,
}
impl Iterator for QueueIterator {
type Item = f32;
#[inline]
fn next(&mut self) -> Option<f32> {
loop {
// basic situation that will happen most of the time
if let Some(sample) = self.current.next() {
return Some(sample);
}
let next = {
let mut next = self.next.lock().unwrap();
if next.len() == 0 {
// if there's no iter waiting, we create a dummy iter with 1000 null samples
// this avoids a spinlock
Box::new((0.. 1000).map(|_| 0.0f32)) as Box<Iterator<Item = f32> + Send>
} else {
next.remove(0)
}
};
self.current = next; | #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// TODO: slow? benchmark this
let next_hints = self.next.lock().unwrap().iter()
.map(|i| i.size_hint().0).fold(0, |a, b| a + b);
(self.current.size_hint().0 + next_hints, None)
}
} | }
}
| random_line_split |
gles2.rs | use std::mem::size_of;
use std::ptr;
use crossfont::RasterizedGlyph;
use log::info;
use alacritty_terminal::term::cell::Flags;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderProgram, ShaderVersion};
use crate::renderer::{cstr, Error, GlExtensions};
use super::atlas::{Atlas, ATLAS_SIZE};
use super::{
glsl3, Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi,
TextRenderBatch, TextRenderer, TextShader,
};
// Shader source.
static TEXT_SHADER_F: &str = include_str!("../../../res/gles2/text.f.glsl");
static TEXT_SHADER_V: &str = include_str!("../../../res/gles2/text.v.glsl");
#[derive(Debug)]
pub struct Gles2Renderer {
program: TextShaderProgram,
vao: GLuint,
vbo: GLuint,
ebo: GLuint,
atlas: Vec<Atlas>,
batch: Batch,
current_atlas: usize,
active_tex: GLuint,
dual_source_blending: bool,
}
impl Gles2Renderer {
pub fn new(allow_dsb: bool, is_gles_context: bool) -> Result<Self, Error> {
info!("Using OpenGL ES 2.0 renderer");
let dual_source_blending = allow_dsb
&& (GlExtensions::contains("GL_EXT_blend_func_extended")
|| GlExtensions::contains("GL_ARB_blend_func_extended"));
if is_gles_context {
info!("Running on OpenGL ES context");
}
if dual_source_blending {
info!("Using dual source blending");
}
let program = TextShaderProgram::new(ShaderVersion::Gles2, dual_source_blending)?;
let mut vao: GLuint = 0;
let mut vbo: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vertex_indices = Vec::with_capacity(BATCH_MAX / 4 * 6);
for index in 0..(BATCH_MAX / 4) as u16 {
let index = index * 4;
vertex_indices.push(index);
vertex_indices.push(index + 1);
vertex_indices.push(index + 3);
vertex_indices.push(index + 1);
vertex_indices.push(index + 2);
vertex_indices.push(index + 3);
}
unsafe {
gl::Enable(gl::BLEND);
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
// Elements buffer.
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(vertex_indices.capacity() * size_of::<u16>()) as isize,
vertex_indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// Vertex buffer.
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<TextVertex>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<TextVertex>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Cell coords.
add_attr!(2, gl::SHORT, i16);
// Glyph coords.
add_attr!(2, gl::SHORT, i16);
// UV.
add_attr!(2, gl::FLOAT, u32);
// Color and bitmap color.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
vbo,
ebo,
atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)],
batch: Batch::new(),
current_atlas: 0,
active_tex: 0,
dual_source_blending,
})
}
}
impl Drop for Gles2Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
impl<'a> TextRenderer<'a> for Gles2Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn program(&self) -> &Self::Shader {
&self.program
}
fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
dual_source_blending: self.dual_source_blending,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
/// Maximum items to be drawn in a batch.
///
/// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph),
/// since it's the maximum possible index in `glDrawElements` in GLES2.
const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize;
#[derive(Debug)]
pub struct | {
tex: GLuint,
vertices: Vec<TextVertex>,
}
impl Batch {
fn new() -> Self {
Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
fn len(&self) -> usize {
self.vertices.len()
}
#[inline]
fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
fn size(&self) -> usize {
self.len() * size_of::<TextVertex>()
}
#[inline]
fn clear(&mut self) {
self.vertices.clear();
}
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
// Calculate the cell position.
let x = cell.point.column.0 as i16 * size_info.cell_width() as i16;
let y = cell.point.line as i16 * size_info.cell_height() as i16;
// Calculate the glyph position.
let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left;
let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top;
let colored = if glyph.multicolor {
RenderingGlyphFlags::COLORED
} else {
RenderingGlyphFlags::empty()
};
let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 };
let mut vertex = TextVertex {
x,
y: y + size_info.cell_height() as i16,
glyph_x,
glyph_y: glyph_y + glyph.height,
u: glyph.uv_left,
v: glyph.uv_bot + glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
colored,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
};
self.vertices.push(vertex);
vertex.y = y;
vertex.glyph_y = glyph_y;
vertex.u = glyph.uv_left;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.y = y + size_info.cell_height() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.glyph_y = glyph_y + glyph.height;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot + glyph.uv_height;
self.vertices.push(vertex);
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
dual_source_blending: bool,
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if!self.batch.is_empty() {
self.render_batch();
}
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.vertices.as_ptr() as *const _,
);
}
if *self.active_tex!= self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
let num_indices = (self.batch.len() / 4 * 6) as i32;
// The rendering is inspired by
// https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md.
// Draw background.
self.program.set_rendering_pass(RenderingPass::Background);
gl::BlendFunc(gl::ONE, gl::ZERO);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
if self.dual_source_blending {
// Text rendering pass.
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
} else {
// First text rendering pass.
gl::BlendFuncSeparate(gl::ZERO, gl::ONE_MINUS_SRC_COLOR, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Second text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass2);
gl::BlendFuncSeparate(gl::ONE_MINUS_DST_ALPHA, gl::ONE, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Third text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass3);
gl::BlendFuncSeparate(gl::ONE, gl::ONE, gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
}
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
}
self.batch.clear();
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct TextVertex {
// Cell coordinates.
x: i16,
y: i16,
// Glyph coordinates.
glyph_x: i16,
glyph_y: i16,
// Offsets into Atlas.
u: f32,
v: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Whether the glyph is colored.
colored: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Rendering pass.
///
/// For dual source blending, there are 2 passes; one for background, another for text,
/// similar to the GLSL3 renderer.
///
/// If GL_EXT_blend_func_extended is not available, the rendering is split into 4 passes.
/// One is used for the background and the rest to perform subpixel text rendering according to
/// <https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md>.
///
/// Rendering is split into three passes.
u_rendering_pass: GLint,
}
impl TextShaderProgram {
pub fn new(shader_version: ShaderVersion, dual_source_blending: bool) -> Result<Self, Error> {
let fragment_shader =
if dual_source_blending { &glsl3::TEXT_SHADER_F } else { &TEXT_SHADER_F };
let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, fragment_shader)?;
Ok(Self {
u_projection: program.get_uniform_location(cstr!("projection"))?,
u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?,
program,
})
}
fn set_rendering_pass(&self, rendering_pass: RenderingPass) {
unsafe { gl::Uniform1i(self.u_rendering_pass, rendering_pass as i32) }
}
}
impl TextShader for TextShaderProgram {
fn id(&self) -> GLuint {
self.program.id()
}
fn projection_uniform(&self) -> GLint {
self.u_projection
}
}
| Batch | identifier_name |
gles2.rs | use std::mem::size_of;
use std::ptr;
use crossfont::RasterizedGlyph;
use log::info;
use alacritty_terminal::term::cell::Flags;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderProgram, ShaderVersion};
use crate::renderer::{cstr, Error, GlExtensions};
use super::atlas::{Atlas, ATLAS_SIZE};
use super::{
glsl3, Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi,
TextRenderBatch, TextRenderer, TextShader,
};
// Shader source.
static TEXT_SHADER_F: &str = include_str!("../../../res/gles2/text.f.glsl");
static TEXT_SHADER_V: &str = include_str!("../../../res/gles2/text.v.glsl");
#[derive(Debug)]
pub struct Gles2Renderer {
program: TextShaderProgram,
vao: GLuint,
vbo: GLuint,
ebo: GLuint,
atlas: Vec<Atlas>,
batch: Batch,
current_atlas: usize,
active_tex: GLuint,
dual_source_blending: bool,
}
impl Gles2Renderer {
pub fn new(allow_dsb: bool, is_gles_context: bool) -> Result<Self, Error> {
info!("Using OpenGL ES 2.0 renderer");
let dual_source_blending = allow_dsb
&& (GlExtensions::contains("GL_EXT_blend_func_extended")
|| GlExtensions::contains("GL_ARB_blend_func_extended"));
if is_gles_context {
info!("Running on OpenGL ES context");
}
if dual_source_blending {
info!("Using dual source blending");
}
let program = TextShaderProgram::new(ShaderVersion::Gles2, dual_source_blending)?;
let mut vao: GLuint = 0;
let mut vbo: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vertex_indices = Vec::with_capacity(BATCH_MAX / 4 * 6);
for index in 0..(BATCH_MAX / 4) as u16 {
let index = index * 4;
vertex_indices.push(index);
vertex_indices.push(index + 1);
vertex_indices.push(index + 3);
vertex_indices.push(index + 1);
vertex_indices.push(index + 2);
vertex_indices.push(index + 3);
}
unsafe {
gl::Enable(gl::BLEND);
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
// Elements buffer.
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(vertex_indices.capacity() * size_of::<u16>()) as isize,
vertex_indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// Vertex buffer.
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<TextVertex>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
| gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<TextVertex>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Cell coords.
add_attr!(2, gl::SHORT, i16);
// Glyph coords.
add_attr!(2, gl::SHORT, i16);
// UV.
add_attr!(2, gl::FLOAT, u32);
// Color and bitmap color.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
vbo,
ebo,
atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)],
batch: Batch::new(),
current_atlas: 0,
active_tex: 0,
dual_source_blending,
})
}
}
impl Drop for Gles2Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
impl<'a> TextRenderer<'a> for Gles2Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn program(&self) -> &Self::Shader {
&self.program
}
fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
dual_source_blending: self.dual_source_blending,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
/// Maximum items to be drawn in a batch.
///
/// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph),
/// since it's the maximum possible index in `glDrawElements` in GLES2.
const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize;
#[derive(Debug)]
pub struct Batch {
tex: GLuint,
vertices: Vec<TextVertex>,
}
impl Batch {
fn new() -> Self {
Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
fn len(&self) -> usize {
self.vertices.len()
}
#[inline]
fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
fn size(&self) -> usize {
self.len() * size_of::<TextVertex>()
}
#[inline]
fn clear(&mut self) {
self.vertices.clear();
}
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
// Calculate the cell position.
let x = cell.point.column.0 as i16 * size_info.cell_width() as i16;
let y = cell.point.line as i16 * size_info.cell_height() as i16;
// Calculate the glyph position.
let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left;
let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top;
let colored = if glyph.multicolor {
RenderingGlyphFlags::COLORED
} else {
RenderingGlyphFlags::empty()
};
let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 };
let mut vertex = TextVertex {
x,
y: y + size_info.cell_height() as i16,
glyph_x,
glyph_y: glyph_y + glyph.height,
u: glyph.uv_left,
v: glyph.uv_bot + glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
colored,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
};
self.vertices.push(vertex);
vertex.y = y;
vertex.glyph_y = glyph_y;
vertex.u = glyph.uv_left;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.y = y + size_info.cell_height() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.glyph_y = glyph_y + glyph.height;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot + glyph.uv_height;
self.vertices.push(vertex);
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
dual_source_blending: bool,
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if!self.batch.is_empty() {
self.render_batch();
}
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.vertices.as_ptr() as *const _,
);
}
if *self.active_tex!= self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
let num_indices = (self.batch.len() / 4 * 6) as i32;
// The rendering is inspired by
// https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md.
// Draw background.
self.program.set_rendering_pass(RenderingPass::Background);
gl::BlendFunc(gl::ONE, gl::ZERO);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
if self.dual_source_blending {
// Text rendering pass.
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
} else {
// First text rendering pass.
gl::BlendFuncSeparate(gl::ZERO, gl::ONE_MINUS_SRC_COLOR, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Second text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass2);
gl::BlendFuncSeparate(gl::ONE_MINUS_DST_ALPHA, gl::ONE, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Third text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass3);
gl::BlendFuncSeparate(gl::ONE, gl::ONE, gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
}
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
}
self.batch.clear();
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct TextVertex {
// Cell coordinates.
x: i16,
y: i16,
// Glyph coordinates.
glyph_x: i16,
glyph_y: i16,
// Offsets into Atlas.
u: f32,
v: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Whether the glyph is colored.
colored: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Rendering pass.
///
/// For dual source blending, there are 2 passes; one for background, another for text,
/// similar to the GLSL3 renderer.
///
/// If GL_EXT_blend_func_extended is not available, the rendering is split into 4 passes.
/// One is used for the background and the rest to perform subpixel text rendering according to
/// <https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md>.
///
/// Rendering is split into three passes.
u_rendering_pass: GLint,
}
impl TextShaderProgram {
pub fn new(shader_version: ShaderVersion, dual_source_blending: bool) -> Result<Self, Error> {
let fragment_shader =
if dual_source_blending { &glsl3::TEXT_SHADER_F } else { &TEXT_SHADER_F };
let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, fragment_shader)?;
Ok(Self {
u_projection: program.get_uniform_location(cstr!("projection"))?,
u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?,
program,
})
}
fn set_rendering_pass(&self, rendering_pass: RenderingPass) {
unsafe { gl::Uniform1i(self.u_rendering_pass, rendering_pass as i32) }
}
}
impl TextShader for TextShaderProgram {
fn id(&self) -> GLuint {
self.program.id()
}
fn projection_uniform(&self) -> GLint {
self.u_projection
}
} | macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => { | random_line_split |
gles2.rs | use std::mem::size_of;
use std::ptr;
use crossfont::RasterizedGlyph;
use log::info;
use alacritty_terminal::term::cell::Flags;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderProgram, ShaderVersion};
use crate::renderer::{cstr, Error, GlExtensions};
use super::atlas::{Atlas, ATLAS_SIZE};
use super::{
glsl3, Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi,
TextRenderBatch, TextRenderer, TextShader,
};
// Shader source.
static TEXT_SHADER_F: &str = include_str!("../../../res/gles2/text.f.glsl");
static TEXT_SHADER_V: &str = include_str!("../../../res/gles2/text.v.glsl");
#[derive(Debug)]
pub struct Gles2Renderer {
program: TextShaderProgram,
vao: GLuint,
vbo: GLuint,
ebo: GLuint,
atlas: Vec<Atlas>,
batch: Batch,
current_atlas: usize,
active_tex: GLuint,
dual_source_blending: bool,
}
impl Gles2Renderer {
pub fn new(allow_dsb: bool, is_gles_context: bool) -> Result<Self, Error> {
info!("Using OpenGL ES 2.0 renderer");
let dual_source_blending = allow_dsb
&& (GlExtensions::contains("GL_EXT_blend_func_extended")
|| GlExtensions::contains("GL_ARB_blend_func_extended"));
if is_gles_context {
info!("Running on OpenGL ES context");
}
if dual_source_blending {
info!("Using dual source blending");
}
let program = TextShaderProgram::new(ShaderVersion::Gles2, dual_source_blending)?;
let mut vao: GLuint = 0;
let mut vbo: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vertex_indices = Vec::with_capacity(BATCH_MAX / 4 * 6);
for index in 0..(BATCH_MAX / 4) as u16 {
let index = index * 4;
vertex_indices.push(index);
vertex_indices.push(index + 1);
vertex_indices.push(index + 3);
vertex_indices.push(index + 1);
vertex_indices.push(index + 2);
vertex_indices.push(index + 3);
}
unsafe {
gl::Enable(gl::BLEND);
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
// Elements buffer.
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(vertex_indices.capacity() * size_of::<u16>()) as isize,
vertex_indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// Vertex buffer.
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<TextVertex>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<TextVertex>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Cell coords.
add_attr!(2, gl::SHORT, i16);
// Glyph coords.
add_attr!(2, gl::SHORT, i16);
// UV.
add_attr!(2, gl::FLOAT, u32);
// Color and bitmap color.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
vbo,
ebo,
atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)],
batch: Batch::new(),
current_atlas: 0,
active_tex: 0,
dual_source_blending,
})
}
}
impl Drop for Gles2Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
impl<'a> TextRenderer<'a> for Gles2Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn program(&self) -> &Self::Shader {
&self.program
}
fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
dual_source_blending: self.dual_source_blending,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
/// Maximum items to be drawn in a batch.
///
/// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph),
/// since it's the maximum possible index in `glDrawElements` in GLES2.
const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize;
#[derive(Debug)]
pub struct Batch {
tex: GLuint,
vertices: Vec<TextVertex>,
}
impl Batch {
fn new() -> Self {
Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
fn len(&self) -> usize {
self.vertices.len()
}
#[inline]
fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
fn size(&self) -> usize {
self.len() * size_of::<TextVertex>()
}
#[inline]
fn clear(&mut self) {
self.vertices.clear();
}
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
// Calculate the cell position.
let x = cell.point.column.0 as i16 * size_info.cell_width() as i16;
let y = cell.point.line as i16 * size_info.cell_height() as i16;
// Calculate the glyph position.
let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left;
let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top;
let colored = if glyph.multicolor {
RenderingGlyphFlags::COLORED
} else | ;
let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 };
let mut vertex = TextVertex {
x,
y: y + size_info.cell_height() as i16,
glyph_x,
glyph_y: glyph_y + glyph.height,
u: glyph.uv_left,
v: glyph.uv_bot + glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
colored,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
};
self.vertices.push(vertex);
vertex.y = y;
vertex.glyph_y = glyph_y;
vertex.u = glyph.uv_left;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.y = y + size_info.cell_height() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.glyph_y = glyph_y + glyph.height;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot + glyph.uv_height;
self.vertices.push(vertex);
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
dual_source_blending: bool,
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) {
if!self.batch.is_empty() {
self.render_batch();
}
}
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.vertices.as_ptr() as *const _,
);
}
if *self.active_tex!= self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
let num_indices = (self.batch.len() / 4 * 6) as i32;
// The rendering is inspired by
// https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md.
// Draw background.
self.program.set_rendering_pass(RenderingPass::Background);
gl::BlendFunc(gl::ONE, gl::ZERO);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
if self.dual_source_blending {
// Text rendering pass.
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
} else {
// First text rendering pass.
gl::BlendFuncSeparate(gl::ZERO, gl::ONE_MINUS_SRC_COLOR, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Second text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass2);
gl::BlendFuncSeparate(gl::ONE_MINUS_DST_ALPHA, gl::ONE, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Third text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass3);
gl::BlendFuncSeparate(gl::ONE, gl::ONE, gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
}
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
}
self.batch.clear();
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct TextVertex {
// Cell coordinates.
x: i16,
y: i16,
// Glyph coordinates.
glyph_x: i16,
glyph_y: i16,
// Offsets into Atlas.
u: f32,
v: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Whether the glyph is colored.
colored: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Rendering pass.
///
/// For dual source blending, there are 2 passes; one for background, another for text,
/// similar to the GLSL3 renderer.
///
/// If GL_EXT_blend_func_extended is not available, the rendering is split into 4 passes.
/// One is used for the background and the rest to perform subpixel text rendering according to
/// <https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md>.
///
/// Rendering is split into three passes.
u_rendering_pass: GLint,
}
impl TextShaderProgram {
pub fn new(shader_version: ShaderVersion, dual_source_blending: bool) -> Result<Self, Error> {
let fragment_shader =
if dual_source_blending { &glsl3::TEXT_SHADER_F } else { &TEXT_SHADER_F };
let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, fragment_shader)?;
Ok(Self {
u_projection: program.get_uniform_location(cstr!("projection"))?,
u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?,
program,
})
}
fn set_rendering_pass(&self, rendering_pass: RenderingPass) {
unsafe { gl::Uniform1i(self.u_rendering_pass, rendering_pass as i32) }
}
}
impl TextShader for TextShaderProgram {
fn id(&self) -> GLuint {
self.program.id()
}
fn projection_uniform(&self) -> GLint {
self.u_projection
}
}
| {
RenderingGlyphFlags::empty()
} | conditional_block |
gles2.rs | use std::mem::size_of;
use std::ptr;
use crossfont::RasterizedGlyph;
use log::info;
use alacritty_terminal::term::cell::Flags;
use crate::display::content::RenderableCell;
use crate::display::SizeInfo;
use crate::gl;
use crate::gl::types::*;
use crate::renderer::shader::{ShaderProgram, ShaderVersion};
use crate::renderer::{cstr, Error, GlExtensions};
use super::atlas::{Atlas, ATLAS_SIZE};
use super::{
glsl3, Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi,
TextRenderBatch, TextRenderer, TextShader,
};
// Shader source.
static TEXT_SHADER_F: &str = include_str!("../../../res/gles2/text.f.glsl");
static TEXT_SHADER_V: &str = include_str!("../../../res/gles2/text.v.glsl");
#[derive(Debug)]
pub struct Gles2Renderer {
program: TextShaderProgram,
vao: GLuint,
vbo: GLuint,
ebo: GLuint,
atlas: Vec<Atlas>,
batch: Batch,
current_atlas: usize,
active_tex: GLuint,
dual_source_blending: bool,
}
impl Gles2Renderer {
pub fn new(allow_dsb: bool, is_gles_context: bool) -> Result<Self, Error> {
info!("Using OpenGL ES 2.0 renderer");
let dual_source_blending = allow_dsb
&& (GlExtensions::contains("GL_EXT_blend_func_extended")
|| GlExtensions::contains("GL_ARB_blend_func_extended"));
if is_gles_context {
info!("Running on OpenGL ES context");
}
if dual_source_blending {
info!("Using dual source blending");
}
let program = TextShaderProgram::new(ShaderVersion::Gles2, dual_source_blending)?;
let mut vao: GLuint = 0;
let mut vbo: GLuint = 0;
let mut ebo: GLuint = 0;
let mut vertex_indices = Vec::with_capacity(BATCH_MAX / 4 * 6);
for index in 0..(BATCH_MAX / 4) as u16 {
let index = index * 4;
vertex_indices.push(index);
vertex_indices.push(index + 1);
vertex_indices.push(index + 3);
vertex_indices.push(index + 1);
vertex_indices.push(index + 2);
vertex_indices.push(index + 3);
}
unsafe {
gl::Enable(gl::BLEND);
gl::DepthMask(gl::FALSE);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut ebo);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
// Elements buffer.
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(vertex_indices.capacity() * size_of::<u16>()) as isize,
vertex_indices.as_ptr() as *const _,
gl::STATIC_DRAW,
);
// Vertex buffer.
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
(BATCH_MAX * size_of::<TextVertex>()) as isize,
ptr::null(),
gl::STREAM_DRAW,
);
let mut index = 0;
let mut size = 0;
macro_rules! add_attr {
($count:expr, $gl_type:expr, $type:ty) => {
gl::VertexAttribPointer(
index,
$count,
$gl_type,
gl::FALSE,
size_of::<TextVertex>() as i32,
size as *const _,
);
gl::EnableVertexAttribArray(index);
#[allow(unused_assignments)]
{
size += $count * size_of::<$type>();
index += 1;
}
};
}
// Cell coords.
add_attr!(2, gl::SHORT, i16);
// Glyph coords.
add_attr!(2, gl::SHORT, i16);
// UV.
add_attr!(2, gl::FLOAT, u32);
// Color and bitmap color.
//
// These are packed together because of an OpenGL driver issue on macOS, which caused a
// `vec3(u8)` text color and a `u8` for glyph color to cause performance regressions.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Background color.
add_attr!(4, gl::UNSIGNED_BYTE, u8);
// Cleanup.
gl::BindVertexArray(0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
}
Ok(Self {
program,
vao,
vbo,
ebo,
atlas: vec![Atlas::new(ATLAS_SIZE, is_gles_context)],
batch: Batch::new(),
current_atlas: 0,
active_tex: 0,
dual_source_blending,
})
}
}
impl Drop for Gles2Renderer {
fn drop(&mut self) {
unsafe {
gl::DeleteBuffers(1, &self.vbo);
gl::DeleteBuffers(1, &self.ebo);
gl::DeleteVertexArrays(1, &self.vao);
}
}
}
impl<'a> TextRenderer<'a> for Gles2Renderer {
type RenderApi = RenderApi<'a>;
type RenderBatch = Batch;
type Shader = TextShaderProgram;
fn program(&self) -> &Self::Shader {
&self.program
}
fn with_api<'b: 'a, F, T>(&'b mut self, _: &'b SizeInfo, func: F) -> T
where
F: FnOnce(Self::RenderApi) -> T,
{
unsafe {
gl::UseProgram(self.program.id());
gl::BindVertexArray(self.vao);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::ActiveTexture(gl::TEXTURE0);
}
let res = func(RenderApi {
active_tex: &mut self.active_tex,
batch: &mut self.batch,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
program: &mut self.program,
dual_source_blending: self.dual_source_blending,
});
unsafe {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
}
res
}
fn loader_api(&mut self) -> LoaderApi<'_> {
LoaderApi {
active_tex: &mut self.active_tex,
atlas: &mut self.atlas,
current_atlas: &mut self.current_atlas,
}
}
}
/// Maximum items to be drawn in a batch.
///
/// We use the closest number to `u16::MAX` dividable by 4 (amount of vertices we push for a glyph),
/// since it's the maximum possible index in `glDrawElements` in GLES2.
const BATCH_MAX: usize = (u16::MAX - u16::MAX % 4) as usize;
#[derive(Debug)]
pub struct Batch {
tex: GLuint,
vertices: Vec<TextVertex>,
}
impl Batch {
fn new() -> Self {
Self { tex: 0, vertices: Vec::with_capacity(BATCH_MAX) }
}
#[inline]
fn len(&self) -> usize {
self.vertices.len()
}
#[inline]
fn capacity(&self) -> usize {
BATCH_MAX
}
#[inline]
fn size(&self) -> usize {
self.len() * size_of::<TextVertex>()
}
#[inline]
fn clear(&mut self) {
self.vertices.clear();
}
}
impl TextRenderBatch for Batch {
#[inline]
fn tex(&self) -> GLuint {
self.tex
}
#[inline]
fn full(&self) -> bool {
self.capacity() == self.len()
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, size_info: &SizeInfo) {
if self.is_empty() {
self.tex = glyph.tex_id;
}
// Calculate the cell position.
let x = cell.point.column.0 as i16 * size_info.cell_width() as i16;
let y = cell.point.line as i16 * size_info.cell_height() as i16;
// Calculate the glyph position.
let glyph_x = cell.point.column.0 as i16 * size_info.cell_width() as i16 + glyph.left;
let glyph_y = (cell.point.line + 1) as i16 * size_info.cell_height() as i16 - glyph.top;
let colored = if glyph.multicolor {
RenderingGlyphFlags::COLORED
} else {
RenderingGlyphFlags::empty()
};
let is_wide = if cell.flags.contains(Flags::WIDE_CHAR) { 2 } else { 1 };
let mut vertex = TextVertex {
x,
y: y + size_info.cell_height() as i16,
glyph_x,
glyph_y: glyph_y + glyph.height,
u: glyph.uv_left,
v: glyph.uv_bot + glyph.uv_height,
r: cell.fg.r,
g: cell.fg.g,
b: cell.fg.b,
colored,
bg_r: cell.bg.r,
bg_g: cell.bg.g,
bg_b: cell.bg.b,
bg_a: (cell.bg_alpha * 255.0) as u8,
};
self.vertices.push(vertex);
vertex.y = y;
vertex.glyph_y = glyph_y;
vertex.u = glyph.uv_left;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot;
self.vertices.push(vertex);
vertex.x = x + is_wide * size_info.cell_width() as i16;
vertex.y = y + size_info.cell_height() as i16;
vertex.glyph_x = glyph_x + glyph.width;
vertex.glyph_y = glyph_y + glyph.height;
vertex.u = glyph.uv_left + glyph.uv_width;
vertex.v = glyph.uv_bot + glyph.uv_height;
self.vertices.push(vertex);
}
}
#[derive(Debug)]
pub struct RenderApi<'a> {
active_tex: &'a mut GLuint,
batch: &'a mut Batch,
atlas: &'a mut Vec<Atlas>,
current_atlas: &'a mut usize,
program: &'a mut TextShaderProgram,
dual_source_blending: bool,
}
impl<'a> Drop for RenderApi<'a> {
fn drop(&mut self) |
}
impl<'a> LoadGlyph for RenderApi<'a> {
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph {
Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized)
}
fn clear(&mut self) {
Atlas::clear_atlas(self.atlas, self.current_atlas)
}
}
impl<'a> TextRenderApi<Batch> for RenderApi<'a> {
fn batch(&mut self) -> &mut Batch {
self.batch
}
fn render_batch(&mut self) {
unsafe {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
self.batch.size() as isize,
self.batch.vertices.as_ptr() as *const _,
);
}
if *self.active_tex!= self.batch.tex() {
unsafe {
gl::BindTexture(gl::TEXTURE_2D, self.batch.tex());
}
*self.active_tex = self.batch.tex();
}
unsafe {
let num_indices = (self.batch.len() / 4 * 6) as i32;
// The rendering is inspired by
// https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md.
// Draw background.
self.program.set_rendering_pass(RenderingPass::Background);
gl::BlendFunc(gl::ONE, gl::ZERO);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
self.program.set_rendering_pass(RenderingPass::SubpixelPass1);
if self.dual_source_blending {
// Text rendering pass.
gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR);
} else {
// First text rendering pass.
gl::BlendFuncSeparate(gl::ZERO, gl::ONE_MINUS_SRC_COLOR, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Second text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass2);
gl::BlendFuncSeparate(gl::ONE_MINUS_DST_ALPHA, gl::ONE, gl::ZERO, gl::ONE);
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
// Third text rendering pass.
self.program.set_rendering_pass(RenderingPass::SubpixelPass3);
gl::BlendFuncSeparate(gl::ONE, gl::ONE, gl::ONE, gl::ONE_MINUS_SRC_ALPHA);
}
gl::DrawElements(gl::TRIANGLES, num_indices, gl::UNSIGNED_SHORT, ptr::null());
}
self.batch.clear();
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct TextVertex {
// Cell coordinates.
x: i16,
y: i16,
// Glyph coordinates.
glyph_x: i16,
glyph_y: i16,
// Offsets into Atlas.
u: f32,
v: f32,
// Color.
r: u8,
g: u8,
b: u8,
// Whether the glyph is colored.
colored: RenderingGlyphFlags,
// Background color.
bg_r: u8,
bg_g: u8,
bg_b: u8,
bg_a: u8,
}
#[derive(Debug)]
pub struct TextShaderProgram {
/// Shader program.
program: ShaderProgram,
/// Projection scale and offset uniform.
u_projection: GLint,
/// Rendering pass.
///
/// For dual source blending, there are 2 passes; one for background, another for text,
/// similar to the GLSL3 renderer.
///
/// If GL_EXT_blend_func_extended is not available, the rendering is split into 4 passes.
/// One is used for the background and the rest to perform subpixel text rendering according to
/// <https://github.com/servo/webrender/blob/master/webrender/doc/text-rendering.md>.
///
/// Rendering is split into three passes.
u_rendering_pass: GLint,
}
impl TextShaderProgram {
pub fn new(shader_version: ShaderVersion, dual_source_blending: bool) -> Result<Self, Error> {
let fragment_shader =
if dual_source_blending { &glsl3::TEXT_SHADER_F } else { &TEXT_SHADER_F };
let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, fragment_shader)?;
Ok(Self {
u_projection: program.get_uniform_location(cstr!("projection"))?,
u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?,
program,
})
}
fn set_rendering_pass(&self, rendering_pass: RenderingPass) {
unsafe { gl::Uniform1i(self.u_rendering_pass, rendering_pass as i32) }
}
}
impl TextShader for TextShaderProgram {
fn id(&self) -> GLuint {
self.program.id()
}
fn projection_uniform(&self) -> GLint {
self.u_projection
}
}
| {
if !self.batch.is_empty() {
self.render_batch();
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.