file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser, | _storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
} | poll: Poll,
timeout: Duration, | random_line_split |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() |
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
}
| {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
} | conditional_block |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> |
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
}
| {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
} | identifier_body |
multi.rs | // Copyright 2021 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
//! The multi-threaded worker, which is used when there are multiple worker
//! threads configured. This worker parses buffers to produce requests, sends
//! the requests to the storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn | (&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
if let Ok(token) = self.poll.add_session(session) {
if pending > 0 {
// handle any pending data immediately
if self.handle_data(token).is_err() {
self.handle_error(token);
}
}
}
}
}
}
impl<Storage, Parser, Request, Response> EventLoop
for MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
fn handle_data(&mut self, token: Token) -> Result<(), std::io::Error> {
let _ = self.handle_session_read(token);
Ok(())
}
fn poll(&mut self) -> &mut Poll {
&mut self.poll
}
}
| handle_new_sessions | identifier_name |
wasm.rs | gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if!self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default()!= ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> | log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.",?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm",!wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env | {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level, | identifier_body |
wasm.rs | "gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true); | if!self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default()!= ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts =?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.",?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm",!wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::W | info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true); | random_line_split |
wasm.rs | gui_wasm";
pub const DEFAULT_TARGET_CRATE: &str = "app/gui";
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum ProfilingLevel {
#[default]
Objective,
Task,
Detail,
Debug,
}
#[derive(
clap::ArgEnum,
Clone,
Copy,
Debug,
Default,
strum::Display,
strum::EnumString,
PartialEq,
Eq
)]
#[strum(serialize_all = "kebab-case")]
pub enum LogLevel {
Error,
#[default]
Warn,
Info,
Debug,
Trace,
}
#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq, Eq, strum::Display, strum::AsRefStr)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
Dev,
Profile,
Release,
// Production,
}
impl From<Profile> for wasm_pack::Profile {
fn from(profile: Profile) -> Self {
match profile {
Profile::Dev => Self::Dev,
Profile::Profile => Self::Profile,
Profile::Release => Self::Release,
// Profile::Production => Self::Release,
}
}
}
impl Profile {
pub fn should_check_size(self) -> bool {
match self {
Profile::Dev => false,
Profile::Profile => false,
Profile::Release => true,
// Profile::Production => true,
}
}
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if!self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default()!= ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts =?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.",?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn | (&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm",!wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env | check | identifier_name |
wasm.rs | #[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if!self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default()!= ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts =?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.",?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm",!wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::WASM_BINDGEN_TEST_TIMEOUT,
wasm_timeout.map(|d| d.as_secs()).as_ref(),
)?
.test()
.apply_opt(headless.then_some(&Headless))
.apply(&test::BROWSER_FOR_WASM_TESTS)
.arg("integration-test")
.arg("--profile=integration-test")
.args(additional_options)
.run_ok()
.await
// PM will be automatically killed by dropping the handle.
}
/// Process "raw" WASM (as compiled) by optionally invoking wasm-opt.
pub async fn finalize_wasm(
wasm_opt_options: &[String],
skip_wasm_opt: bool,
profile: Profile,
temp_dist: &RepoRootDistWasm,
) -> Result {
let should_call_wasm_opt = {
if profile == Profile::Dev {
debug!("Skipping wasm-opt invocation, as it is not part of profile {profile}.");
false
} else if skip_wasm_opt {
debug!("Skipping wasm-opt invocation, as it was explicitly requested.");
false
} else {
true
}
};
if should_call_wasm_opt | {
let mut wasm_opt_command = WasmOpt.cmd()?;
let has_custom_opt_level = wasm_opt_options.iter().any(|opt| {
wasm_opt::OptimizationLevel::from_str(opt.trim_start_matches('-')).is_ok()
});
if !has_custom_opt_level {
wasm_opt_command.apply(&profile.optimization_level());
}
wasm_opt_command
.args(wasm_opt_options)
.arg(&temp_dist.pkg_wasm)
.apply(&wasm_opt::Output(&temp_dist.pkg_opt_wasm))
.run_ok()
.await?;
} | conditional_block |
|
server.rs | use std::thread;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::BufReader;
// MIO
use mio::tcp::{listen, TcpListener, TcpStream};
use mio::util::Slab;
use mio::Socket;
use mio::buf::{RingBuf};
use mio::{Interest, PollOpt, NonBlock, Token, EventLoop, Handler, ReadHint};
use mio::buf::Buf;
use mio::{TryRead, TryWrite};
use rand::{self, Rng};
// Data structures.
use store::Store;
use replica::{Replica, Emit, Broadcast};
use state_machine::StateMachine;
// Cap'n Proto
use capnp::serialize_packed;
use capnp::{
MessageBuilder,
MessageReader,
ReaderOptions,
MallocMessageBuilder,
OwnedSpaceMessageReader,
};
use messages_capnp::{
rpc_request,
rpc_response,
client_request,
client_response,
request_vote_response,
append_entries_response,
append_entries_request,
};
use super::{Error, Result};
// MIO Tokens
const ELECTION_TIMEOUT: Token = Token(0);
const HEARTBEAT_TIMEOUT: Token = Token(1);
const LISTENER: Token = Token(2);
const ELECTION_MIN: u64 = 150;
const ELECTION_MAX: u64 = 300;
const HEARTBEAT_DURATION: u64 = 50;
const RINGBUF_SIZE: usize = 4096;
/// The Raft Distributed Consensus Algorithm requires two RPC calls to be available:
///
/// * `append_entries` which is used as both a heartbeat (with no payload) and the primary
/// interface for requests.
/// * `request_vote` which is used by candidates during campaigns to obtain a vote.
///
/// A `Server` acts as a replicated state machine. The server's role in the cluster depends on it's
/// own status. It will maintain both volatile state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn | <S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one.
fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => {
// Won an election!
self.broadcast(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
}
} else if let Ok(client_req) = reader.get_root::<client_request::Reader>() {
let mut should_die = false;
// We will be responding.
match client_req.which().unwrap() {
client_request::Which::Append(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_append(from, call, builder)
};
match respond {
Some(emit) => {
self.emit(builder_message);
},
None => (),
}
},
client_request::Which::Die(Ok(call)) => {
should_die = true;
let mut builder = builder_message.init_root::<client_response::Builder>();
builder.set_success(());
self.interest.insert(Interest::writable());
debug!("Got a Die request from Client({}). Reason: {}", from, call);
},
client_request::Which::LeaderRefresh(()) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_leader_refresh(from, builder)
};
match respond {
Some(Emit) => {
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
// Do this here so that we can send the response.
if should_die {
panic!("Got a Die request.");
}
} else {
// It's something we don't understand.
unimplemented!();
}
}
fn broadcast(&mut self, builder: MallocMessageBuilder) {
unimplemented!();
}
/// Push the new message into `self.next_write`. This does not actually send the message, it
/// just queues it up.
pub fn emit(&mut self, mut builder: MallocMessageBuilder) {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut builder
).unwrap();
self.add_write(buf);
}
/// This queues a byte buffer into the write queue. This is used primarily when message has
/// already been packed.
pub fn add_write(&mut self, buf: RingBuf) {
self.next_write.push_back(buf);
}
}
| writable | identifier_name |
server.rs | use std::thread;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::BufReader;
// MIO
use mio::tcp::{listen, TcpListener, TcpStream};
use mio::util::Slab;
use mio::Socket;
use mio::buf::{RingBuf};
use mio::{Interest, PollOpt, NonBlock, Token, EventLoop, Handler, ReadHint};
use mio::buf::Buf;
use mio::{TryRead, TryWrite};
use rand::{self, Rng};
// Data structures.
use store::Store;
use replica::{Replica, Emit, Broadcast};
use state_machine::StateMachine;
// Cap'n Proto
use capnp::serialize_packed;
use capnp::{
MessageBuilder,
MessageReader,
ReaderOptions,
MallocMessageBuilder,
OwnedSpaceMessageReader,
};
use messages_capnp::{
rpc_request,
rpc_response,
client_request,
client_response,
request_vote_response,
append_entries_response,
append_entries_request,
};
use super::{Error, Result};
// MIO Tokens
const ELECTION_TIMEOUT: Token = Token(0);
const HEARTBEAT_TIMEOUT: Token = Token(1);
const LISTENER: Token = Token(2);
const ELECTION_MIN: u64 = 150;
const ELECTION_MAX: u64 = 300;
const HEARTBEAT_DURATION: u64 = 50;
const RINGBUF_SIZE: usize = 4096;
/// The Raft Distributed Consensus Algorithm requires two RPC calls to be available:
///
/// * `append_entries` which is used as both a heartbeat (with no payload) and the primary
/// interface for requests.
/// * `request_vote` which is used by candidates during campaigns to obtain a vote.
///
/// A `Server` acts as a replicated state machine. The server's role in the cluster depends on it's
/// own status. It will maintain both volatile state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn writable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one. | -> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => {
// Won an election!
self.broadcast(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
}
} else if let Ok(client_req) = reader.get_root::<client_request::Reader>() {
let mut should_die = false;
// We will be responding.
match client_req.which().unwrap() {
client_request::Which::Append(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_append(from, call, builder)
};
match respond {
Some(emit) => {
self.emit(builder_message);
},
None => (),
}
},
client_request::Which::Die(Ok(call)) => {
should_die = true;
let mut builder = builder_message.init_root::<client_response::Builder>();
builder.set_success(());
self.interest.insert(Interest::writable());
debug!("Got a Die request from Client({}). Reason: {}", from, call);
},
client_request::Which::LeaderRefresh(()) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_leader_refresh(from, builder)
};
match respond {
Some(Emit) => {
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
// Do this here so that we can send the response.
if should_die {
panic!("Got a Die request.");
}
} else {
// It's something we don't understand.
unimplemented!();
}
}
fn broadcast(&mut self, builder: MallocMessageBuilder) {
unimplemented!();
}
/// Push the new message into `self.next_write`. This does not actually send the message, it
/// just queues it up.
pub fn emit(&mut self, mut builder: MallocMessageBuilder) {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut builder
).unwrap();
self.add_write(buf);
}
/// This queues a byte buffer into the write queue. This is used primarily when message has
/// already been packed.
pub fn add_write(&mut self, buf: RingBuf) {
self.next_write.push_back(buf);
}
} | fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>) | random_line_split |
server.rs | use std::thread;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::BufReader;
// MIO
use mio::tcp::{listen, TcpListener, TcpStream};
use mio::util::Slab;
use mio::Socket;
use mio::buf::{RingBuf};
use mio::{Interest, PollOpt, NonBlock, Token, EventLoop, Handler, ReadHint};
use mio::buf::Buf;
use mio::{TryRead, TryWrite};
use rand::{self, Rng};
// Data structures.
use store::Store;
use replica::{Replica, Emit, Broadcast};
use state_machine::StateMachine;
// Cap'n Proto
use capnp::serialize_packed;
use capnp::{
MessageBuilder,
MessageReader,
ReaderOptions,
MallocMessageBuilder,
OwnedSpaceMessageReader,
};
use messages_capnp::{
rpc_request,
rpc_response,
client_request,
client_response,
request_vote_response,
append_entries_response,
append_entries_request,
};
use super::{Error, Result};
// MIO Tokens
const ELECTION_TIMEOUT: Token = Token(0);
const HEARTBEAT_TIMEOUT: Token = Token(1);
const LISTENER: Token = Token(2);
const ELECTION_MIN: u64 = 150;
const ELECTION_MAX: u64 = 300;
const HEARTBEAT_DURATION: u64 = 50;
const RINGBUF_SIZE: usize = 4096;
/// The Raft Distributed Consensus Algorithm requires two RPC calls to be available:
///
/// * `append_entries` which is used as both a heartbeat (with no payload) and the primary
/// interface for requests.
/// * `request_vote` which is used by candidates during campaigns to obtain a vote.
///
/// A `Server` acts as a replicated state machine. The server's role in the cluster depends on it's
/// own status. It will maintain both volatile state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn writable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one.
fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => | ,
None => (),
}
},
_ => unimplemented!(),
}
} else if let Ok(client_req) = reader.get_root::<client_request::Reader>() {
let mut should_die = false;
// We will be responding.
match client_req.which().unwrap() {
client_request::Which::Append(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_append(from, call, builder)
};
match respond {
Some(emit) => {
self.emit(builder_message);
},
None => (),
}
},
client_request::Which::Die(Ok(call)) => {
should_die = true;
let mut builder = builder_message.init_root::<client_response::Builder>();
builder.set_success(());
self.interest.insert(Interest::writable());
debug!("Got a Die request from Client({}). Reason: {}", from, call);
},
client_request::Which::LeaderRefresh(()) => {
let respond = {
let builder = builder_message.init_root::<client_response::Builder>();
replica.client_leader_refresh(from, builder)
};
match respond {
Some(Emit) => {
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
// Do this here so that we can send the response.
if should_die {
panic!("Got a Die request.");
}
} else {
// It's something we don't understand.
unimplemented!();
}
}
fn broadcast(&mut self, builder: MallocMessageBuilder) {
unimplemented!();
}
/// Push the new message into `self.next_write`. This does not actually send the message, it
/// just queues it up.
pub fn emit(&mut self, mut builder: MallocMessageBuilder) {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut builder
).unwrap();
self.add_write(buf);
}
/// This queues a byte buffer into the write queue. This is used primarily when message has
/// already been packed.
pub fn add_write(&mut self, buf: RingBuf) {
self.next_write.push_back(buf);
}
}
| {
// Won an election!
self.broadcast(builder_message);
} | conditional_block |
group.rs |
left * q.j,
-left * q.i,
q.w,
],
))
}
/// Computes the [direct sum](https://en.wikipedia.org/wiki/Block_matrix#Direct_sum)
/// of two matrices.
fn direct_sum(mat1: Matrix<f64>, mat2: Matrix<f64>) -> Matrix<f64> {
let dim1 = mat1.nrows();
let dim = dim1 + mat2.nrows();
Matrix::from_fn(dim, dim, |i, j| {
if i < dim1 {
if j < dim1 {
mat1[(i, j)]
} else {
0.0
}
} else if j >= dim1 {
mat2[(i - dim1, j - dim1)]
} else {
0.0
}
})
}
/// An iterator such that `dyn` objects using it can be cloned. Used to get
/// around orphan rules.
trait GroupIter: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone {}
impl<T: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone> GroupIter for T {}
dyn_clone::clone_trait_object!(GroupIter);
/// A [group](https://en.wikipedia.org/wiki/Group_(mathematics)) of matrices,
/// acting on a space of a certain dimension.
#[derive(Clone)]
pub struct Group {
/// The dimension of the matrices of the group. Stored separately so that
/// the iterator doesn't have to be peekable.
dim: usize,
/// The underlying iterator, which actually outputs the matrices.
iter: Box<dyn GroupIter>,
}
impl Iterator for Group {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl Group {
/// Gets all of the elements of the group. Consumes the iterator.
pub fn elements(self) -> Vec<Matrix<f64>> {
self.collect()
}
/// Gets the number of elements of the group. Consumes the iterators.
pub fn order(self) -> usize {
self.count()
}
pub fn from_gens(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
Self {
dim,
iter: Box::new(GenIter::new(dim, gens)),
}
}
/// Buils the rotation subgroup of a group.
pub fn rotations(self) -> Self {
// The determinant might not be exactly 1, so we're extra lenient and
// just test for positive determinants.
Self {
dim: self.dim,
iter: Box::new(self.filter(|el| el.determinant() > 0.0)),
}
}
/// Builds an iterator over the set of either left or a right quaternions
/// from a 3D group. **These won't actually generate a group,** as they
/// don't contain central inversion.
fn quaternions(self, left: bool) -> Box<dyn GroupIter> {
if self.dim!= 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim!= 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim!= 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self |
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone +'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone +'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim!= h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows();
let nn = n.norm_squared();
// Reflects every basis vector, builds a matrix from all of their images.
Matrix::from_columns(
&Matrix::identity(dim, dim)
.column_iter()
.map(|v| v - (2.0 * v.dot(&n) / nn) * &n)
.collect::<Vec<_>>(),
)
}
impl GenIter {
/// Builds a new group from a set of generators.
fn new(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
// Initializes the queue with only the identity matrix.
let mut queue = VecDeque::new();
queue.push_back(OrdMatrix::new(Matrix::identity(dim, dim)));
// We say that the identity has been found zero times. This is a special
// case that ensures that neither the identity is queued nor found
// twice.
let mut elements = BTreeMap::new();
elements.insert(OrdMatrix::new(Matrix::identity(dim, dim)), 0);
Self {
dim,
gens,
elements,
queue,
gen_idx: 0,
}
}
/// Inserts a new element into the group. Returns whether the element is new.
fn insert(&mut self, el: Matrix<f64>) -> bool {
let el = OrdMatrix::new(el);
// If the element has been found before.
if let Some(value) = self.elements.insert(el.clone(), 1) {
// Bumps the value by 1, or removes the element if this is the last
// time we'll find the element.
if value!= self.gens.len() - 1 {
self.elements.insert(el, value + 1);
} else {
self.elements.remove(&el);
}
// The element is a repeat, except in the special case of the
// identity.
value == 0
}
// If the element is new, we add it to the queue as well.
else {
self.queue.push_back(el);
true
}
}
/// Gets the next element and the next generator to attempt to multiply.
/// Advances the iterator.
fn next_el_gen(&mut self) -> Option<[Matrix<f64>; 2]> {
let el = self.queue.front()?.0.clone();
let gen = self.gens[self.gen_idx].clone();
// Advances the indices.
self.gen_idx += 1;
if self.gen_idx == self.gens.len() {
self.gen_idx = 0;
self.queue.pop_front();
}
Some([el, gen])
}
/// Multiplies the current element times the current generator, determines
/// if it is a new element. Advances the iterator.
fn try_next(&mut self) -> GroupNext {
// If there's a next element and generator.
if let Some([el, gen]) = self.next_el_gen() {
let new_el = el * gen;
// If the group element is new.
if self.insert(new_el.clone()) {
GroupNext::New(new_el)
}
// If we found a repeat.
else {
GroupNext::Repeat
}
}
// If we already went through the entire group.
else {
GroupNext::None
}
}
pub fn from_cox_mat(cox: CoxMatrix) -> Option<Self> {
const EPS: f64 = 1e-6;
let dim = cox.nrows();
let mut generators = Vec::with_capacity(dim);
// Builds each generator from the top down as a triangular matrix, so
// that the dot products match the values in the Coxeter matrix.
for i in 0..dim {
let mut gen_i = Vector::from | {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
} | identifier_body |
group.rs | ,
left * q.j,
-left * q.i,
q.w,
],
))
}
/// Computes the [direct sum](https://en.wikipedia.org/wiki/Block_matrix#Direct_sum)
/// of two matrices.
fn direct_sum(mat1: Matrix<f64>, mat2: Matrix<f64>) -> Matrix<f64> {
let dim1 = mat1.nrows();
let dim = dim1 + mat2.nrows();
Matrix::from_fn(dim, dim, |i, j| {
if i < dim1 {
if j < dim1 {
mat1[(i, j)]
} else {
0.0
}
} else if j >= dim1 {
mat2[(i - dim1, j - dim1)]
} else {
0.0
}
})
}
/// An iterator such that `dyn` objects using it can be cloned. Used to get
/// around orphan rules.
trait GroupIter: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone {}
impl<T: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone> GroupIter for T {}
dyn_clone::clone_trait_object!(GroupIter);
/// A [group](https://en.wikipedia.org/wiki/Group_(mathematics)) of matrices,
/// acting on a space of a certain dimension.
#[derive(Clone)]
pub struct Group {
/// The dimension of the matrices of the group. Stored separately so that
/// the iterator doesn't have to be peekable.
dim: usize,
/// The underlying iterator, which actually outputs the matrices.
iter: Box<dyn GroupIter>,
}
impl Iterator for Group {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl Group {
/// Gets all of the elements of the group. Consumes the iterator.
pub fn elements(self) -> Vec<Matrix<f64>> {
self.collect()
}
/// Gets the number of elements of the group. Consumes the iterators.
pub fn order(self) -> usize {
self.count()
}
pub fn from_gens(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
Self {
dim,
iter: Box::new(GenIter::new(dim, gens)),
}
}
/// Buils the rotation subgroup of a group.
pub fn rotations(self) -> Self {
// The determinant might not be exactly 1, so we're extra lenient and
// just test for positive determinants.
Self {
dim: self.dim,
iter: Box::new(self.filter(|el| el.determinant() > 0.0)),
}
}
/// Builds an iterator over the set of either left or a right quaternions
/// from a 3D group. **These won't actually generate a group,** as they
/// don't contain central inversion.
fn quaternions(self, left: bool) -> Box<dyn GroupIter> {
if self.dim!= 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim!= 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim!= 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
}
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone +'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone +'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim!= h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows(); | let nn = n.norm_squared();
// Reflects every basis vector, builds a matrix from all of their images.
Matrix::from_columns(
&Matrix::identity(dim, dim)
.column_iter()
.map(|v| v - (2.0 * v.dot(&n) / nn) * &n)
.collect::<Vec<_>>(),
)
}
impl GenIter {
/// Builds a new group from a set of generators.
fn new(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
// Initializes the queue with only the identity matrix.
let mut queue = VecDeque::new();
queue.push_back(OrdMatrix::new(Matrix::identity(dim, dim)));
// We say that the identity has been found zero times. This is a special
// case that ensures that neither the identity is queued nor found
// twice.
let mut elements = BTreeMap::new();
elements.insert(OrdMatrix::new(Matrix::identity(dim, dim)), 0);
Self {
dim,
gens,
elements,
queue,
gen_idx: 0,
}
}
/// Inserts a new element into the group. Returns whether the element is new.
fn insert(&mut self, el: Matrix<f64>) -> bool {
let el = OrdMatrix::new(el);
// If the element has been found before.
if let Some(value) = self.elements.insert(el.clone(), 1) {
// Bumps the value by 1, or removes the element if this is the last
// time we'll find the element.
if value!= self.gens.len() - 1 {
self.elements.insert(el, value + 1);
} else {
self.elements.remove(&el);
}
// The element is a repeat, except in the special case of the
// identity.
value == 0
}
// If the element is new, we add it to the queue as well.
else {
self.queue.push_back(el);
true
}
}
/// Gets the next element and the next generator to attempt to multiply.
/// Advances the iterator.
fn next_el_gen(&mut self) -> Option<[Matrix<f64>; 2]> {
let el = self.queue.front()?.0.clone();
let gen = self.gens[self.gen_idx].clone();
// Advances the indices.
self.gen_idx += 1;
if self.gen_idx == self.gens.len() {
self.gen_idx = 0;
self.queue.pop_front();
}
Some([el, gen])
}
/// Multiplies the current element times the current generator, determines
/// if it is a new element. Advances the iterator.
fn try_next(&mut self) -> GroupNext {
// If there's a next element and generator.
if let Some([el, gen]) = self.next_el_gen() {
let new_el = el * gen;
// If the group element is new.
if self.insert(new_el.clone()) {
GroupNext::New(new_el)
}
// If we found a repeat.
else {
GroupNext::Repeat
}
}
// If we already went through the entire group.
else {
GroupNext::None
}
}
pub fn from_cox_mat(cox: CoxMatrix) -> Option<Self> {
const EPS: f64 = 1e-6;
let dim = cox.nrows();
let mut generators = Vec::with_capacity(dim);
// Builds each generator from the top down as a triangular matrix, so
// that the dot products match the values in the Coxeter matrix.
for i in 0..dim {
let mut gen_i = Vector::from_ | random_line_split |
|
group.rs |
left * q.j,
-left * q.i,
q.w,
],
))
}
/// Computes the [direct sum](https://en.wikipedia.org/wiki/Block_matrix#Direct_sum)
/// of two matrices.
fn direct_sum(mat1: Matrix<f64>, mat2: Matrix<f64>) -> Matrix<f64> {
let dim1 = mat1.nrows();
let dim = dim1 + mat2.nrows();
Matrix::from_fn(dim, dim, |i, j| {
if i < dim1 {
if j < dim1 {
mat1[(i, j)]
} else {
0.0
}
} else if j >= dim1 {
mat2[(i - dim1, j - dim1)]
} else {
0.0
}
})
}
/// An iterator such that `dyn` objects using it can be cloned. Used to get
/// around orphan rules.
trait GroupIter: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone {}
impl<T: Iterator<Item = Matrix<f64>> + dyn_clone::DynClone> GroupIter for T {}
dyn_clone::clone_trait_object!(GroupIter);
/// A [group](https://en.wikipedia.org/wiki/Group_(mathematics)) of matrices,
/// acting on a space of a certain dimension.
#[derive(Clone)]
pub struct Group {
/// The dimension of the matrices of the group. Stored separately so that
/// the iterator doesn't have to be peekable.
dim: usize,
/// The underlying iterator, which actually outputs the matrices.
iter: Box<dyn GroupIter>,
}
impl Iterator for Group {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl Group {
/// Gets all of the elements of the group. Consumes the iterator.
pub fn elements(self) -> Vec<Matrix<f64>> {
self.collect()
}
/// Gets the number of elements of the group. Consumes the iterators.
pub fn order(self) -> usize {
self.count()
}
pub fn from_gens(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
Self {
dim,
iter: Box::new(GenIter::new(dim, gens)),
}
}
/// Buils the rotation subgroup of a group.
pub fn rotations(self) -> Self {
// The determinant might not be exactly 1, so we're extra lenient and
// just test for positive determinants.
Self {
dim: self.dim,
iter: Box::new(self.filter(|el| el.determinant() > 0.0)),
}
}
/// Builds an iterator over the set of either left or a right quaternions
/// from a 3D group. **These won't actually generate a group,** as they
/// don't contain central inversion.
fn | (self, left: bool) -> Box<dyn GroupIter> {
if self.dim!= 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim!= 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim!= 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
}
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone +'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone +'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim!= h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows();
let nn = n.norm_squared();
// Reflects every basis vector, builds a matrix from all of their images.
Matrix::from_columns(
&Matrix::identity(dim, dim)
.column_iter()
.map(|v| v - (2.0 * v.dot(&n) / nn) * &n)
.collect::<Vec<_>>(),
)
}
impl GenIter {
/// Builds a new group from a set of generators.
fn new(dim: usize, gens: Vec<Matrix<f64>>) -> Self {
// Initializes the queue with only the identity matrix.
let mut queue = VecDeque::new();
queue.push_back(OrdMatrix::new(Matrix::identity(dim, dim)));
// We say that the identity has been found zero times. This is a special
// case that ensures that neither the identity is queued nor found
// twice.
let mut elements = BTreeMap::new();
elements.insert(OrdMatrix::new(Matrix::identity(dim, dim)), 0);
Self {
dim,
gens,
elements,
queue,
gen_idx: 0,
}
}
/// Inserts a new element into the group. Returns whether the element is new.
fn insert(&mut self, el: Matrix<f64>) -> bool {
let el = OrdMatrix::new(el);
// If the element has been found before.
if let Some(value) = self.elements.insert(el.clone(), 1) {
// Bumps the value by 1, or removes the element if this is the last
// time we'll find the element.
if value!= self.gens.len() - 1 {
self.elements.insert(el, value + 1);
} else {
self.elements.remove(&el);
}
// The element is a repeat, except in the special case of the
// identity.
value == 0
}
// If the element is new, we add it to the queue as well.
else {
self.queue.push_back(el);
true
}
}
/// Gets the next element and the next generator to attempt to multiply.
/// Advances the iterator.
fn next_el_gen(&mut self) -> Option<[Matrix<f64>; 2]> {
let el = self.queue.front()?.0.clone();
let gen = self.gens[self.gen_idx].clone();
// Advances the indices.
self.gen_idx += 1;
if self.gen_idx == self.gens.len() {
self.gen_idx = 0;
self.queue.pop_front();
}
Some([el, gen])
}
/// Multiplies the current element times the current generator, determines
/// if it is a new element. Advances the iterator.
fn try_next(&mut self) -> GroupNext {
// If there's a next element and generator.
if let Some([el, gen]) = self.next_el_gen() {
let new_el = el * gen;
// If the group element is new.
if self.insert(new_el.clone()) {
GroupNext::New(new_el)
}
// If we found a repeat.
else {
GroupNext::Repeat
}
}
// If we already went through the entire group.
else {
GroupNext::None
}
}
pub fn from_cox_mat(cox: CoxMatrix) -> Option<Self> {
const EPS: f64 = 1e-6;
let dim = cox.nrows();
let mut generators = Vec::with_capacity(dim);
// Builds each generator from the top down as a triangular matrix, so
// that the dot products match the values in the Coxeter matrix.
for i in 0..dim {
let mut gen_i = Vector::from | quaternions | identifier_name |
task.rs | use notifier::Notifier;
use sender::Sender;
use futures::{self, future, Future, Async};
use futures::executor::{self, Spawn};
use std::{fmt, mem, panic, ptr};
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release, Relaxed};
#[cfg(feature = "unstable-futures")]
use futures2;
pub(crate) struct Task {
ptr: *mut Inner,
}
#[derive(Debug)]
pub(crate) struct Queue {
head: AtomicPtr<Inner>,
tail: Cell<*mut Inner>,
stub: Box<Inner>,
}
#[derive(Debug)]
pub(crate) enum Poll {
Empty,
Inconsistent,
Data(Task),
}
#[derive(Debug)]
pub(crate) enum Run {
Idle,
Schedule,
Complete,
}
type BoxFuture = Box<Future<Item = (), Error = ()> + Send +'static>;
#[cfg(feature = "unstable-futures")]
type BoxFuture2 = Box<futures2::Future<Item = (), Error = futures2::Never> + Send>;
enum TaskFuture {
Futures1(Spawn<BoxFuture>),
#[cfg(feature = "unstable-futures")]
Futures2 {
tls: futures2::task::LocalMap,
waker: futures2::task::Waker,
fut: BoxFuture2,
}
}
struct Inner {
// Next pointer in the queue that submits tasks to a worker.
next: AtomicPtr<Inner>,
// Task state
state: AtomicUsize,
// Number of outstanding references to the task
ref_count: AtomicUsize,
// Store the future at the head of the struct
//
// The future is dropped immediately when it transitions to Complete
future: Option<TaskFuture>,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum State {
/// Task is currently idle
Idle,
/// Task is currently running
Running,
/// Task is currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn | (&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release)!= 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {}
// ===== impl Inner =====
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if!next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire)!= tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if!next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State {
State::Idle
}
}
impl From<usize> for State {
fn from(src: usize) -> Self {
use self::State::*;
match src {
0 => Idle,
1 => Running,
2 => Notified,
3 => Scheduled,
4 => Complete,
_ => unreachable!(),
}
}
}
impl From<State> for usize {
fn from(src: State) -> Self {
use self::State::*;
match src {
Idle => 0,
Running => 1,
Notified => 2,
Scheduled => 3,
Complete => 4,
}
}
}
// ===== impl TaskFuture =====
impl TaskFuture {
#[allow(unused_variables)]
fn poll(&mut self, unpark: &Arc<Notifier>, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> {
match *self {
TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id),
#[cfg(feature = "unstable-futures")]
TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => {
let mut cx = futures2::task::Context::new(tls, waker, exec);
match fut.poll(&mut cx).unwrap() {
futures2::Async::Pending => Ok(Async::NotReady),
futures2::Async::Ready(x) => Ok(Async::Ready(x)),
}
}
}
}
}
| fmt | identifier_name |
task.rs | use notifier::Notifier;
use sender::Sender;
use futures::{self, future, Future, Async};
use futures::executor::{self, Spawn};
use std::{fmt, mem, panic, ptr};
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release, Relaxed};
#[cfg(feature = "unstable-futures")]
use futures2;
pub(crate) struct Task {
ptr: *mut Inner,
}
#[derive(Debug)]
pub(crate) struct Queue {
head: AtomicPtr<Inner>,
tail: Cell<*mut Inner>,
stub: Box<Inner>,
}
#[derive(Debug)]
pub(crate) enum Poll {
Empty,
Inconsistent,
Data(Task),
}
#[derive(Debug)]
pub(crate) enum Run {
Idle,
Schedule,
Complete,
}
type BoxFuture = Box<Future<Item = (), Error = ()> + Send +'static>;
#[cfg(feature = "unstable-futures")]
type BoxFuture2 = Box<futures2::Future<Item = (), Error = futures2::Never> + Send>;
enum TaskFuture {
Futures1(Spawn<BoxFuture>),
#[cfg(feature = "unstable-futures")]
Futures2 {
tls: futures2::task::LocalMap,
waker: futures2::task::Waker,
fut: BoxFuture2,
}
}
struct Inner {
// Next pointer in the queue that submits tasks to a worker.
next: AtomicPtr<Inner>,
// Task state
state: AtomicUsize,
// Number of outstanding references to the task
ref_count: AtomicUsize,
// Store the future at the head of the struct
//
// The future is dropped immediately when it transitions to Complete
future: Option<TaskFuture>,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum State {
/// Task is currently idle
Idle,
/// Task is currently running
Running,
/// Task is currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release)!= 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {}
// ===== impl Inner =====
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if!next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire)!= tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if!next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State |
}
impl From<usize> for State {
fn from(src: usize) -> Self {
use self::State::*;
match src {
0 => Idle,
1 => Running,
2 => Notified,
3 => Scheduled,
4 => Complete,
_ => unreachable!(),
}
}
}
impl From<State> for usize {
fn from(src: State) -> Self {
use self::State::*;
match src {
Idle => 0,
Running => 1,
Notified => 2,
Scheduled => 3,
Complete => 4,
}
}
}
// ===== impl TaskFuture =====
impl TaskFuture {
#[allow(unused_variables)]
fn poll(&mut self, unpark: &Arc<Notifier>, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> {
match *self {
TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id),
#[cfg(feature = "unstable-futures")]
TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => {
let mut cx = futures2::task::Context::new(tls, waker, exec);
match fut.poll(&mut cx).unwrap() {
futures2::Async::Pending => Ok(Async::NotReady),
futures2::Async::Ready(x) => Ok(Async::Ready(x)),
}
}
}
}
}
| {
State::Idle
} | identifier_body |
task.rs | use notifier::Notifier;
use sender::Sender;
use futures::{self, future, Future, Async};
use futures::executor::{self, Spawn};
use std::{fmt, mem, panic, ptr};
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release, Relaxed};
#[cfg(feature = "unstable-futures")]
use futures2;
pub(crate) struct Task {
ptr: *mut Inner,
}
#[derive(Debug)]
pub(crate) struct Queue {
head: AtomicPtr<Inner>,
tail: Cell<*mut Inner>,
stub: Box<Inner>,
}
#[derive(Debug)]
pub(crate) enum Poll {
Empty,
Inconsistent,
Data(Task),
}
#[derive(Debug)]
pub(crate) enum Run {
Idle,
Schedule,
Complete,
}
type BoxFuture = Box<Future<Item = (), Error = ()> + Send +'static>;
#[cfg(feature = "unstable-futures")]
type BoxFuture2 = Box<futures2::Future<Item = (), Error = futures2::Never> + Send>;
enum TaskFuture {
Futures1(Spawn<BoxFuture>),
#[cfg(feature = "unstable-futures")]
Futures2 {
tls: futures2::task::LocalMap,
waker: futures2::task::Waker,
fut: BoxFuture2,
}
}
struct Inner {
// Next pointer in the queue that submits tasks to a worker.
next: AtomicPtr<Inner>,
// Task state
state: AtomicUsize,
// Number of outstanding references to the task
ref_count: AtomicUsize,
// Store the future at the head of the struct
//
// The future is dropped immediately when it transitions to Complete
future: Option<TaskFuture>,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum State {
/// Task is currently idle
Idle,
/// Task is currently running
Running,
/// Task is currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release)!= 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {} |
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if!next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire)!= tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if!next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State {
State::Idle
}
}
impl From<usize> for State {
fn from(src: usize) -> Self {
use self::State::*;
match src {
0 => Idle,
1 => Running,
2 => Notified,
3 => Scheduled,
4 => Complete,
_ => unreachable!(),
}
}
}
impl From<State> for usize {
fn from(src: State) -> Self {
use self::State::*;
match src {
Idle => 0,
Running => 1,
Notified => 2,
Scheduled => 3,
Complete => 4,
}
}
}
// ===== impl TaskFuture =====
impl TaskFuture {
#[allow(unused_variables)]
fn poll(&mut self, unpark: &Arc<Notifier>, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> {
match *self {
TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id),
#[cfg(feature = "unstable-futures")]
TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => {
let mut cx = futures2::task::Context::new(tls, waker, exec);
match fut.poll(&mut cx).unwrap() {
futures2::Async::Pending => Ok(Async::NotReady),
futures2::Async::Ready(x) => Ok(Async::Ready(x)),
}
}
}
}
} |
// ===== impl Inner ===== | random_line_split |
file_hook.rs | use std::ffi::CStr;
use std::ptr::null_mut;
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use libc::c_void;
use super::scr;
use super::thiscall::Thiscall;
pub fn open_file_hook(
out: *mut scr::FileHandle,
path: *const u8,
params: *const scr::OpenParams,
orig: unsafe extern fn(
*mut scr::FileHandle, *const u8, *const scr::OpenParams,
) -> *mut scr::FileHandle,
) -> *mut scr::FileHandle {
unsafe {
let mut buffer = ArrayVec::new();
let real = real_path(path, params, &mut buffer);
if let Some(path) = real {
let is_sd = (*params).file_type == 1;
if!is_sd {
if let Some(patched) = check_dummied_out_hd(path) {
memory_buffer_to_bw_file_handle(patched, out);
return out;
}
}
}
orig(out, path, params)
}
}
static DUMMY_ANIM: &[u8] = include_bytes!("../../files/dummy.anim");
static DUMMY_DDSGRP: &[u8] = &[0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x10];
static DUMMY_SKINS: &[u8] = br#"{"skins":[]}"#;
fn check_dummied_out_hd(path: &[u8]) -> Option<&'static [u8]> {
if path.ends_with(b".anim") {
// Avoid touching tileset/foliage.anim
if path.starts_with(b"anim/") {
return Some(DUMMY_ANIM);
}
} else if path.ends_with(b".dds") {
// Font dds files are used (only) in SD, but they aren't loaded
// on file param SD.
if!path.starts_with(b"font/") {
// Anim happens to have a dds inside it :)
let dummy_dds = &DUMMY_ANIM[0x174..];
return Some(dummy_dds);
}
} else if path.ends_with(b".dds.vr4") {
return Some(DUMMY_DDSGRP);
} else if path.ends_with(b".dds.grp") {
// Avoid tileset.dds.grps, they need their frames
if path.starts_with(b"unit/") || path.starts_with(b"effect/") {
return Some(DUMMY_DDSGRP);
}
} else if path == b"anim/skins.json" {
return Some(DUMMY_SKINS);
}
None
}
/// If `params` has a file extension set, it will override whatever
/// extension `path` has.
///
/// Why it is done like that, I have no idea.
///
/// This function also normalizes to ascii lowercase and replaces any '\\' with '/'
unsafe fn real_path<'a>(
path: *const u8,
params: *const scr::OpenParams,
buffer: &'a mut ArrayVec<[u8; 256]>,
) -> Option<&'a [u8]> {
let c_path = CStr::from_ptr(path as *const i8);
let c_path = c_path.to_bytes();
let alt_extension = if (*params).extension.is_null() {
None
} else {
Some(CStr::from_ptr((*params).extension as *const i8))
};
let c_path_for_switched_extension = match alt_extension.is_some() {
true => match c_path.iter().rev().position(|&x| x == b'.') {
Some(period) => &c_path[..c_path.len() - period - 1],
None => c_path,
},
false => c_path,
};
if let Err(_) = buffer.try_extend_from_slice(c_path_for_switched_extension) {
return None;
}
if let Some(ext) = alt_extension {
if let Err(_) = buffer.try_extend_from_slice(ext.to_bytes()) {
return None;
}
}
let slice = &mut buffer[..];
for val in slice.iter_mut() {
match *val {
b'A'..= b'Z' => {
*val = b'a' + (*val - b'A');
}
b'\\' => {
*val = b'/';
}
_ => (),
}
}
Some(slice)
}
unsafe fn memory_buffer_to_bw_file_handle(buffer: &'static [u8], handle: *mut scr::FileHandle) {
let inner = Box::new(FileAllocation {
file: FileState {
buffer,
pos: 0,
},
read: scr::FileRead {
vtable: &*FILE_READ_VTABLE,
inner: null_mut(),
},
peek: scr::FilePeek {
vtable: &*FILE_PEEK_VTABLE,
inner: null_mut(),
},
metadata: scr::FileMetadata {
vtable: &*FILE_METADATA_VTABLE,
inner: null_mut(),
},
});
let inner_ptr = Box::into_raw(inner);
(*inner_ptr).metadata.inner = inner_ptr as *mut c_void;
(*inner_ptr).peek.inner = inner_ptr as *mut c_void;
(*inner_ptr).read.inner = inner_ptr as *mut c_void;
let close_callback = scr::Function {
vtable: &*FUNCTION_VTABLE,
inner: inner_ptr as *mut c_void,
};
*handle = scr::FileHandle {
vtable: &*FILE_HANDLE_VTABLE1,
vtable2: &*FILE_HANDLE_VTABLE2,
vtable3: &*FILE_HANDLE_VTABLE3,
metadata: &mut (*inner_ptr).metadata,
peek: &mut (*inner_ptr).peek,
read: &mut (*inner_ptr).read,
file_ok: 1,
close_callback,
};
}
struct FileAllocation {
file: FileState,
read: scr::FileRead,
peek: scr::FilePeek,
metadata: scr::FileMetadata,
}
struct FileState {
buffer: &'static [u8],
pos: u32,
}
lazy_static! {
static ref FILE_HANDLE_VTABLE1: scr::V_FileHandle1 = scr::V_FileHandle1 {
destroy: Thiscall::new(file_handle_destroy_nop),
read: Thiscall::new(read_file_wrap),
skip: Thiscall::new(skip_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_HANDLE_VTABLE2: scr::V_FileHandle2 = scr::V_FileHandle2 {
unk0: [0; 1],
peek: Thiscall::new(peek_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_HANDLE_VTABLE3: scr::V_FileHandle3 = scr::V_FileHandle3 {
unk0: [0; 1],
tell: Thiscall::new(tell_wrap),
seek: Thiscall::new(seek_wrap),
file_size: Thiscall::new(file_size_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_METADATA_VTABLE: scr::V_FileMetadata = scr::V_FileMetadata {
unk0: [0; 1],
tell: Thiscall::new(tell),
seek: Thiscall::new(seek),
file_size: Thiscall::new(file_size),
safety_padding: [0; 0x20],
};
static ref FILE_READ_VTABLE: scr::V_FileRead = scr::V_FileRead {
destroy: 0,
read: Thiscall::new(read_file),
skip: Thiscall::new(skip),
safety_padding: [0; 0x20],
};
static ref FILE_PEEK_VTABLE: scr::V_FilePeek = scr::V_FilePeek {
destroy: 0,
peek: Thiscall::new(peek),
safety_padding: [0; 0x20],
};
static ref FUNCTION_VTABLE: scr::V_Function = scr::V_Function {
destroy_inner: Thiscall::new(function_nop_destory),
invoke: Thiscall::new(close_file),
get_sizes: Thiscall::new(function_object_size),
copy: Thiscall::new(function_copy),
copy2: Thiscall::new(function_copy),
safety_padding: [0; 0x20],
};
}
unsafe extern fn file_handle_destroy_nop(_file: *mut scr::FileHandle, _dyn_free: u32) {
}
unsafe extern fn function_nop_destory(_file: *mut scr::Function, _unk: u32) {
}
unsafe extern fn function_object_size(
_file: *mut scr::Function,
size: *mut u32,
) {
*size = 0xc;
*size.add(1) = 0x4;
*(size.add(2) as *mut u8) = 0x1;
}
unsafe extern fn function_copy(this: *mut scr::Function, other: *mut scr::Function) {
*other = *this;
}
unsafe extern fn read_file_wrap(file: *mut scr::FileHandle, out: *mut u8, size: u32) -> u32 {
let read = (*file).read;
let vtable = (*read).vtable;
(*vtable).read.call3(read, out, size)
}
unsafe extern fn skip_wrap(file: *mut scr::FileHandle, size: u32) |
unsafe extern fn read_file(file: *mut scr::FileRead, out: *mut u8, size: u32) -> u32 {
let file = (*file).inner as *mut FileAllocation;
let buf = std::slice::from_raw_parts_mut(out, size as usize);
(*file).file.read(buf)
}
unsafe extern fn skip(file: *mut scr::FileRead, size: u32) {
let file = (*file).inner as *mut FileAllocation;
let pos = (*file).file.tell();
(*file).file.seek(pos.saturating_add(size));
}
unsafe extern fn peek_wrap(file: *mut c_void, out: *mut u8, size: u32) -> u32 {
let file = (file as usize - 4) as *mut scr::FileHandle;
let peek = (*file).peek;
let vtable = (*peek).vtable;
(*vtable).peek.call3(peek, out, size)
}
unsafe extern fn peek(file: *mut scr::FilePeek, out: *mut u8, size: u32) -> u32 {
let file = (*file).inner as *mut FileAllocation;
let buf = std::slice::from_raw_parts_mut(out, size as usize);
let old_pos = (*file).file.tell();
let result = (*file).file.read(buf);
(*file).file.seek(old_pos);
result
}
unsafe extern fn tell_wrap(file: *mut c_void) -> u32 {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).tell.call1(metadata)
}
unsafe extern fn seek_wrap(file: *mut c_void, pos: u32) {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).seek.call2(metadata, pos)
}
unsafe extern fn file_size_wrap(file: *mut c_void) -> u32 {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).file_size.call1(metadata)
}
unsafe extern fn tell(file: *mut scr::FileMetadata) -> u32 {
let file = (*file).inner as *mut FileAllocation;
(*file).file.tell()
}
unsafe extern fn seek(file: *mut scr::FileMetadata, pos: u32) {
let file = (*file).inner as *mut FileAllocation;
(*file).file.seek(pos);
}
unsafe extern fn file_size(file: *mut scr::FileMetadata) -> u32 {
let file = (*file).inner as *mut FileAllocation;
(*file).file.size()
}
unsafe extern fn close_file(this: *mut scr::Function) {
let file = (*this).inner as *mut FileAllocation;
// Hopefully ok?
Box::from_raw(file);
}
impl FileState {
pub fn tell(&self) -> u32 {
self.pos
}
pub fn seek(&mut self, pos: u32) {
self.pos = pos;
}
pub fn size(&self) -> u32 {
self.buffer.len() as u32
}
pub fn read(&mut self, out: &mut [u8]) -> u32 {
let buffer = &self.buffer[self.pos as usize..];
let read_len = out.len().min(buffer.len());
(&mut out[..read_len]).copy_from_slice(&buffer[..read_len]);
self.pos += read_len as u32;
read_len as u32
}
}
| {
let read = (*file).read;
let vtable = (*read).vtable;
(*vtable).skip.call2(read, size)
} | identifier_body |
file_hook.rs | use std::ffi::CStr;
use std::ptr::null_mut;
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use libc::c_void;
use super::scr;
use super::thiscall::Thiscall;
pub fn open_file_hook(
out: *mut scr::FileHandle,
path: *const u8,
params: *const scr::OpenParams,
orig: unsafe extern fn(
*mut scr::FileHandle, *const u8, *const scr::OpenParams,
) -> *mut scr::FileHandle,
) -> *mut scr::FileHandle {
unsafe {
let mut buffer = ArrayVec::new();
let real = real_path(path, params, &mut buffer);
if let Some(path) = real {
let is_sd = (*params).file_type == 1;
if!is_sd {
if let Some(patched) = check_dummied_out_hd(path) {
memory_buffer_to_bw_file_handle(patched, out);
return out;
}
}
}
orig(out, path, params)
}
}
static DUMMY_ANIM: &[u8] = include_bytes!("../../files/dummy.anim");
static DUMMY_DDSGRP: &[u8] = &[0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x10];
static DUMMY_SKINS: &[u8] = br#"{"skins":[]}"#;
fn check_dummied_out_hd(path: &[u8]) -> Option<&'static [u8]> {
if path.ends_with(b".anim") {
// Avoid touching tileset/foliage.anim
if path.starts_with(b"anim/") {
return Some(DUMMY_ANIM);
}
} else if path.ends_with(b".dds") {
// Font dds files are used (only) in SD, but they aren't loaded
// on file param SD.
if!path.starts_with(b"font/") {
// Anim happens to have a dds inside it :)
let dummy_dds = &DUMMY_ANIM[0x174..];
return Some(dummy_dds);
}
} else if path.ends_with(b".dds.vr4") {
return Some(DUMMY_DDSGRP);
} else if path.ends_with(b".dds.grp") {
// Avoid tileset.dds.grps, they need their frames
if path.starts_with(b"unit/") || path.starts_with(b"effect/") {
return Some(DUMMY_DDSGRP);
}
} else if path == b"anim/skins.json" {
return Some(DUMMY_SKINS);
}
None
}
/// If `params` has a file extension set, it will override whatever
/// extension `path` has.
///
/// Why it is done like that, I have no idea.
///
/// This function also normalizes to ascii lowercase and replaces any '\\' with '/'
unsafe fn real_path<'a>(
path: *const u8,
params: *const scr::OpenParams,
buffer: &'a mut ArrayVec<[u8; 256]>,
) -> Option<&'a [u8]> {
let c_path = CStr::from_ptr(path as *const i8);
let c_path = c_path.to_bytes();
let alt_extension = if (*params).extension.is_null() {
None
} else {
Some(CStr::from_ptr((*params).extension as *const i8))
};
| true => match c_path.iter().rev().position(|&x| x == b'.') {
Some(period) => &c_path[..c_path.len() - period - 1],
None => c_path,
},
false => c_path,
};
if let Err(_) = buffer.try_extend_from_slice(c_path_for_switched_extension) {
return None;
}
if let Some(ext) = alt_extension {
if let Err(_) = buffer.try_extend_from_slice(ext.to_bytes()) {
return None;
}
}
let slice = &mut buffer[..];
for val in slice.iter_mut() {
match *val {
b'A'..= b'Z' => {
*val = b'a' + (*val - b'A');
}
b'\\' => {
*val = b'/';
}
_ => (),
}
}
Some(slice)
}
unsafe fn memory_buffer_to_bw_file_handle(buffer: &'static [u8], handle: *mut scr::FileHandle) {
let inner = Box::new(FileAllocation {
file: FileState {
buffer,
pos: 0,
},
read: scr::FileRead {
vtable: &*FILE_READ_VTABLE,
inner: null_mut(),
},
peek: scr::FilePeek {
vtable: &*FILE_PEEK_VTABLE,
inner: null_mut(),
},
metadata: scr::FileMetadata {
vtable: &*FILE_METADATA_VTABLE,
inner: null_mut(),
},
});
let inner_ptr = Box::into_raw(inner);
(*inner_ptr).metadata.inner = inner_ptr as *mut c_void;
(*inner_ptr).peek.inner = inner_ptr as *mut c_void;
(*inner_ptr).read.inner = inner_ptr as *mut c_void;
let close_callback = scr::Function {
vtable: &*FUNCTION_VTABLE,
inner: inner_ptr as *mut c_void,
};
*handle = scr::FileHandle {
vtable: &*FILE_HANDLE_VTABLE1,
vtable2: &*FILE_HANDLE_VTABLE2,
vtable3: &*FILE_HANDLE_VTABLE3,
metadata: &mut (*inner_ptr).metadata,
peek: &mut (*inner_ptr).peek,
read: &mut (*inner_ptr).read,
file_ok: 1,
close_callback,
};
}
struct FileAllocation {
file: FileState,
read: scr::FileRead,
peek: scr::FilePeek,
metadata: scr::FileMetadata,
}
struct FileState {
buffer: &'static [u8],
pos: u32,
}
lazy_static! {
static ref FILE_HANDLE_VTABLE1: scr::V_FileHandle1 = scr::V_FileHandle1 {
destroy: Thiscall::new(file_handle_destroy_nop),
read: Thiscall::new(read_file_wrap),
skip: Thiscall::new(skip_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_HANDLE_VTABLE2: scr::V_FileHandle2 = scr::V_FileHandle2 {
unk0: [0; 1],
peek: Thiscall::new(peek_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_HANDLE_VTABLE3: scr::V_FileHandle3 = scr::V_FileHandle3 {
unk0: [0; 1],
tell: Thiscall::new(tell_wrap),
seek: Thiscall::new(seek_wrap),
file_size: Thiscall::new(file_size_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_METADATA_VTABLE: scr::V_FileMetadata = scr::V_FileMetadata {
unk0: [0; 1],
tell: Thiscall::new(tell),
seek: Thiscall::new(seek),
file_size: Thiscall::new(file_size),
safety_padding: [0; 0x20],
};
static ref FILE_READ_VTABLE: scr::V_FileRead = scr::V_FileRead {
destroy: 0,
read: Thiscall::new(read_file),
skip: Thiscall::new(skip),
safety_padding: [0; 0x20],
};
static ref FILE_PEEK_VTABLE: scr::V_FilePeek = scr::V_FilePeek {
destroy: 0,
peek: Thiscall::new(peek),
safety_padding: [0; 0x20],
};
static ref FUNCTION_VTABLE: scr::V_Function = scr::V_Function {
destroy_inner: Thiscall::new(function_nop_destory),
invoke: Thiscall::new(close_file),
get_sizes: Thiscall::new(function_object_size),
copy: Thiscall::new(function_copy),
copy2: Thiscall::new(function_copy),
safety_padding: [0; 0x20],
};
}
unsafe extern fn file_handle_destroy_nop(_file: *mut scr::FileHandle, _dyn_free: u32) {
}
unsafe extern fn function_nop_destory(_file: *mut scr::Function, _unk: u32) {
}
unsafe extern fn function_object_size(
_file: *mut scr::Function,
size: *mut u32,
) {
*size = 0xc;
*size.add(1) = 0x4;
*(size.add(2) as *mut u8) = 0x1;
}
unsafe extern fn function_copy(this: *mut scr::Function, other: *mut scr::Function) {
*other = *this;
}
unsafe extern fn read_file_wrap(file: *mut scr::FileHandle, out: *mut u8, size: u32) -> u32 {
let read = (*file).read;
let vtable = (*read).vtable;
(*vtable).read.call3(read, out, size)
}
unsafe extern fn skip_wrap(file: *mut scr::FileHandle, size: u32) {
let read = (*file).read;
let vtable = (*read).vtable;
(*vtable).skip.call2(read, size)
}
unsafe extern fn read_file(file: *mut scr::FileRead, out: *mut u8, size: u32) -> u32 {
let file = (*file).inner as *mut FileAllocation;
let buf = std::slice::from_raw_parts_mut(out, size as usize);
(*file).file.read(buf)
}
unsafe extern fn skip(file: *mut scr::FileRead, size: u32) {
let file = (*file).inner as *mut FileAllocation;
let pos = (*file).file.tell();
(*file).file.seek(pos.saturating_add(size));
}
unsafe extern fn peek_wrap(file: *mut c_void, out: *mut u8, size: u32) -> u32 {
let file = (file as usize - 4) as *mut scr::FileHandle;
let peek = (*file).peek;
let vtable = (*peek).vtable;
(*vtable).peek.call3(peek, out, size)
}
unsafe extern fn peek(file: *mut scr::FilePeek, out: *mut u8, size: u32) -> u32 {
let file = (*file).inner as *mut FileAllocation;
let buf = std::slice::from_raw_parts_mut(out, size as usize);
let old_pos = (*file).file.tell();
let result = (*file).file.read(buf);
(*file).file.seek(old_pos);
result
}
unsafe extern fn tell_wrap(file: *mut c_void) -> u32 {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).tell.call1(metadata)
}
unsafe extern fn seek_wrap(file: *mut c_void, pos: u32) {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).seek.call2(metadata, pos)
}
unsafe extern fn file_size_wrap(file: *mut c_void) -> u32 {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).file_size.call1(metadata)
}
unsafe extern fn tell(file: *mut scr::FileMetadata) -> u32 {
let file = (*file).inner as *mut FileAllocation;
(*file).file.tell()
}
unsafe extern fn seek(file: *mut scr::FileMetadata, pos: u32) {
let file = (*file).inner as *mut FileAllocation;
(*file).file.seek(pos);
}
unsafe extern fn file_size(file: *mut scr::FileMetadata) -> u32 {
let file = (*file).inner as *mut FileAllocation;
(*file).file.size()
}
unsafe extern fn close_file(this: *mut scr::Function) {
let file = (*this).inner as *mut FileAllocation;
// Hopefully ok?
Box::from_raw(file);
}
impl FileState {
pub fn tell(&self) -> u32 {
self.pos
}
pub fn seek(&mut self, pos: u32) {
self.pos = pos;
}
pub fn size(&self) -> u32 {
self.buffer.len() as u32
}
pub fn read(&mut self, out: &mut [u8]) -> u32 {
let buffer = &self.buffer[self.pos as usize..];
let read_len = out.len().min(buffer.len());
(&mut out[..read_len]).copy_from_slice(&buffer[..read_len]);
self.pos += read_len as u32;
read_len as u32
}
} | let c_path_for_switched_extension = match alt_extension.is_some() { | random_line_split |
file_hook.rs | use std::ffi::CStr;
use std::ptr::null_mut;
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use libc::c_void;
use super::scr;
use super::thiscall::Thiscall;
pub fn open_file_hook(
out: *mut scr::FileHandle,
path: *const u8,
params: *const scr::OpenParams,
orig: unsafe extern fn(
*mut scr::FileHandle, *const u8, *const scr::OpenParams,
) -> *mut scr::FileHandle,
) -> *mut scr::FileHandle {
unsafe {
let mut buffer = ArrayVec::new();
let real = real_path(path, params, &mut buffer);
if let Some(path) = real {
let is_sd = (*params).file_type == 1;
if!is_sd {
if let Some(patched) = check_dummied_out_hd(path) {
memory_buffer_to_bw_file_handle(patched, out);
return out;
}
}
}
orig(out, path, params)
}
}
static DUMMY_ANIM: &[u8] = include_bytes!("../../files/dummy.anim");
static DUMMY_DDSGRP: &[u8] = &[0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x10];
static DUMMY_SKINS: &[u8] = br#"{"skins":[]}"#;
fn check_dummied_out_hd(path: &[u8]) -> Option<&'static [u8]> {
if path.ends_with(b".anim") {
// Avoid touching tileset/foliage.anim
if path.starts_with(b"anim/") {
return Some(DUMMY_ANIM);
}
} else if path.ends_with(b".dds") {
// Font dds files are used (only) in SD, but they aren't loaded
// on file param SD.
if!path.starts_with(b"font/") {
// Anim happens to have a dds inside it :)
let dummy_dds = &DUMMY_ANIM[0x174..];
return Some(dummy_dds);
}
} else if path.ends_with(b".dds.vr4") {
return Some(DUMMY_DDSGRP);
} else if path.ends_with(b".dds.grp") {
// Avoid tileset.dds.grps, they need their frames
if path.starts_with(b"unit/") || path.starts_with(b"effect/") {
return Some(DUMMY_DDSGRP);
}
} else if path == b"anim/skins.json" {
return Some(DUMMY_SKINS);
}
None
}
/// If `params` has a file extension set, it will override whatever
/// extension `path` has.
///
/// Why it is done like that, I have no idea.
///
/// This function also normalizes to ascii lowercase and replaces any '\\' with '/'
unsafe fn real_path<'a>(
path: *const u8,
params: *const scr::OpenParams,
buffer: &'a mut ArrayVec<[u8; 256]>,
) -> Option<&'a [u8]> {
let c_path = CStr::from_ptr(path as *const i8);
let c_path = c_path.to_bytes();
let alt_extension = if (*params).extension.is_null() {
None
} else {
Some(CStr::from_ptr((*params).extension as *const i8))
};
let c_path_for_switched_extension = match alt_extension.is_some() {
true => match c_path.iter().rev().position(|&x| x == b'.') {
Some(period) => &c_path[..c_path.len() - period - 1],
None => c_path,
},
false => c_path,
};
if let Err(_) = buffer.try_extend_from_slice(c_path_for_switched_extension) {
return None;
}
if let Some(ext) = alt_extension {
if let Err(_) = buffer.try_extend_from_slice(ext.to_bytes()) {
return None;
}
}
let slice = &mut buffer[..];
for val in slice.iter_mut() {
match *val {
b'A'..= b'Z' => {
*val = b'a' + (*val - b'A');
}
b'\\' => {
*val = b'/';
}
_ => (),
}
}
Some(slice)
}
unsafe fn | (buffer: &'static [u8], handle: *mut scr::FileHandle) {
let inner = Box::new(FileAllocation {
file: FileState {
buffer,
pos: 0,
},
read: scr::FileRead {
vtable: &*FILE_READ_VTABLE,
inner: null_mut(),
},
peek: scr::FilePeek {
vtable: &*FILE_PEEK_VTABLE,
inner: null_mut(),
},
metadata: scr::FileMetadata {
vtable: &*FILE_METADATA_VTABLE,
inner: null_mut(),
},
});
let inner_ptr = Box::into_raw(inner);
(*inner_ptr).metadata.inner = inner_ptr as *mut c_void;
(*inner_ptr).peek.inner = inner_ptr as *mut c_void;
(*inner_ptr).read.inner = inner_ptr as *mut c_void;
let close_callback = scr::Function {
vtable: &*FUNCTION_VTABLE,
inner: inner_ptr as *mut c_void,
};
*handle = scr::FileHandle {
vtable: &*FILE_HANDLE_VTABLE1,
vtable2: &*FILE_HANDLE_VTABLE2,
vtable3: &*FILE_HANDLE_VTABLE3,
metadata: &mut (*inner_ptr).metadata,
peek: &mut (*inner_ptr).peek,
read: &mut (*inner_ptr).read,
file_ok: 1,
close_callback,
};
}
struct FileAllocation {
file: FileState,
read: scr::FileRead,
peek: scr::FilePeek,
metadata: scr::FileMetadata,
}
struct FileState {
buffer: &'static [u8],
pos: u32,
}
lazy_static! {
static ref FILE_HANDLE_VTABLE1: scr::V_FileHandle1 = scr::V_FileHandle1 {
destroy: Thiscall::new(file_handle_destroy_nop),
read: Thiscall::new(read_file_wrap),
skip: Thiscall::new(skip_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_HANDLE_VTABLE2: scr::V_FileHandle2 = scr::V_FileHandle2 {
unk0: [0; 1],
peek: Thiscall::new(peek_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_HANDLE_VTABLE3: scr::V_FileHandle3 = scr::V_FileHandle3 {
unk0: [0; 1],
tell: Thiscall::new(tell_wrap),
seek: Thiscall::new(seek_wrap),
file_size: Thiscall::new(file_size_wrap),
safety_padding: [0; 0x20],
};
static ref FILE_METADATA_VTABLE: scr::V_FileMetadata = scr::V_FileMetadata {
unk0: [0; 1],
tell: Thiscall::new(tell),
seek: Thiscall::new(seek),
file_size: Thiscall::new(file_size),
safety_padding: [0; 0x20],
};
static ref FILE_READ_VTABLE: scr::V_FileRead = scr::V_FileRead {
destroy: 0,
read: Thiscall::new(read_file),
skip: Thiscall::new(skip),
safety_padding: [0; 0x20],
};
static ref FILE_PEEK_VTABLE: scr::V_FilePeek = scr::V_FilePeek {
destroy: 0,
peek: Thiscall::new(peek),
safety_padding: [0; 0x20],
};
static ref FUNCTION_VTABLE: scr::V_Function = scr::V_Function {
destroy_inner: Thiscall::new(function_nop_destory),
invoke: Thiscall::new(close_file),
get_sizes: Thiscall::new(function_object_size),
copy: Thiscall::new(function_copy),
copy2: Thiscall::new(function_copy),
safety_padding: [0; 0x20],
};
}
unsafe extern fn file_handle_destroy_nop(_file: *mut scr::FileHandle, _dyn_free: u32) {
}
unsafe extern fn function_nop_destory(_file: *mut scr::Function, _unk: u32) {
}
unsafe extern fn function_object_size(
_file: *mut scr::Function,
size: *mut u32,
) {
*size = 0xc;
*size.add(1) = 0x4;
*(size.add(2) as *mut u8) = 0x1;
}
unsafe extern fn function_copy(this: *mut scr::Function, other: *mut scr::Function) {
*other = *this;
}
unsafe extern fn read_file_wrap(file: *mut scr::FileHandle, out: *mut u8, size: u32) -> u32 {
let read = (*file).read;
let vtable = (*read).vtable;
(*vtable).read.call3(read, out, size)
}
unsafe extern fn skip_wrap(file: *mut scr::FileHandle, size: u32) {
let read = (*file).read;
let vtable = (*read).vtable;
(*vtable).skip.call2(read, size)
}
unsafe extern fn read_file(file: *mut scr::FileRead, out: *mut u8, size: u32) -> u32 {
let file = (*file).inner as *mut FileAllocation;
let buf = std::slice::from_raw_parts_mut(out, size as usize);
(*file).file.read(buf)
}
unsafe extern fn skip(file: *mut scr::FileRead, size: u32) {
let file = (*file).inner as *mut FileAllocation;
let pos = (*file).file.tell();
(*file).file.seek(pos.saturating_add(size));
}
unsafe extern fn peek_wrap(file: *mut c_void, out: *mut u8, size: u32) -> u32 {
let file = (file as usize - 4) as *mut scr::FileHandle;
let peek = (*file).peek;
let vtable = (*peek).vtable;
(*vtable).peek.call3(peek, out, size)
}
unsafe extern fn peek(file: *mut scr::FilePeek, out: *mut u8, size: u32) -> u32 {
let file = (*file).inner as *mut FileAllocation;
let buf = std::slice::from_raw_parts_mut(out, size as usize);
let old_pos = (*file).file.tell();
let result = (*file).file.read(buf);
(*file).file.seek(old_pos);
result
}
unsafe extern fn tell_wrap(file: *mut c_void) -> u32 {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).tell.call1(metadata)
}
unsafe extern fn seek_wrap(file: *mut c_void, pos: u32) {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).seek.call2(metadata, pos)
}
unsafe extern fn file_size_wrap(file: *mut c_void) -> u32 {
let file = (file as usize - 8) as *mut scr::FileHandle;
let metadata = (*file).metadata;
let vtable = (*metadata).vtable;
(*vtable).file_size.call1(metadata)
}
unsafe extern fn tell(file: *mut scr::FileMetadata) -> u32 {
let file = (*file).inner as *mut FileAllocation;
(*file).file.tell()
}
unsafe extern fn seek(file: *mut scr::FileMetadata, pos: u32) {
let file = (*file).inner as *mut FileAllocation;
(*file).file.seek(pos);
}
unsafe extern fn file_size(file: *mut scr::FileMetadata) -> u32 {
let file = (*file).inner as *mut FileAllocation;
(*file).file.size()
}
unsafe extern fn close_file(this: *mut scr::Function) {
let file = (*this).inner as *mut FileAllocation;
// Hopefully ok?
Box::from_raw(file);
}
impl FileState {
pub fn tell(&self) -> u32 {
self.pos
}
pub fn seek(&mut self, pos: u32) {
self.pos = pos;
}
pub fn size(&self) -> u32 {
self.buffer.len() as u32
}
pub fn read(&mut self, out: &mut [u8]) -> u32 {
let buffer = &self.buffer[self.pos as usize..];
let read_len = out.len().min(buffer.len());
(&mut out[..read_len]).copy_from_slice(&buffer[..read_len]);
self.pos += read_len as u32;
read_len as u32
}
}
| memory_buffer_to_bw_file_handle | identifier_name |
net.rs | // Copyright 2020 - developers of the `grammers` project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use super::updates::UpdateIter;
use super::{Client, ClientHandle, Config, Request, Step};
use futures::future::FutureExt as _;
use futures::{future, pin_mut};
use grammers_mtproto::{mtp, transport};
use grammers_mtsender::{self as sender, AuthorizationError, InvocationError, Sender};
use grammers_tl_types::{self as tl, Deserializable};
use log::info;
use std::net::Ipv4Addr;
use tokio::sync::{mpsc, oneshot};
/// Socket addresses to Telegram datacenters, where the index into this array
/// represents the data center ID.
///
/// The addresses were obtained from the `static` addresses through a call to
/// `functions::help::GetConfig`.
const DC_ADDRESSES: [(Ipv4Addr, u16); 6] = [
(Ipv4Addr::new(149, 154, 167, 51), 443), // default (2)
(Ipv4Addr::new(149, 154, 175, 53), 443),
(Ipv4Addr::new(149, 154, 167, 51), 443),
(Ipv4Addr::new(149, 154, 175, 100), 443),
(Ipv4Addr::new(149, 154, 167, 92), 443),
(Ipv4Addr::new(91, 108, 56, 190), 443),
];
pub(crate) async fn connect_sender(
dc_id: i32,
config: &mut Config,
) -> Result<Sender<transport::Full, mtp::Encrypted>, AuthorizationError> {
let transport = transport::Full::new();
let addr = DC_ADDRESSES[dc_id as usize];
let mut sender = if let Some(auth_key) = config.session.auth_key.as_ref() {
info!(
"creating a new sender with existing auth key to dc {} {:?}",
dc_id, addr
);
sender::connect_with_auth(transport, addr, auth_key.clone()).await?
} else {
info!(
"creating a new sender and auth key in dc {} {:?}",
dc_id, addr
);
let sender = sender::connect(transport, addr).await?;
config.session.auth_key = Some(sender.auth_key().clone());
config.session.save()?;
sender
};
// TODO handle -404 (we had a previously-valid authkey, but server no longer knows about it)
// TODO all up-to-date server addresses should be stored in the session for future initial connections
let _remote_config = sender
.invoke(&tl::functions::InvokeWithLayer {
layer: tl::LAYER,
query: tl::functions::InitConnection {
api_id: config.api_id,
device_model: config.params.device_model.clone(),
system_version: config.params.system_version.clone(),
app_version: config.params.app_version.clone(),
system_lang_code: config.params.system_lang_code.clone(),
lang_pack: "".into(),
lang_code: config.params.lang_code.clone(),
proxy: None,
params: None,
query: tl::functions::help::GetConfig {},
},
})
.await?;
// TODO use the dc id from the config as "this dc", not the input dc id
config.session.user_dc = Some(dc_id);
config.session.save()?;
Ok(sender)
}
/// Method implementations directly related with network connectivity.
impl Client {
/// Creates and returns a new client instance upon successful connection to Telegram.
///
/// If the session in the configuration did not have an authorization key, a new one
/// will be created and the session will be saved with it.
///
/// The connection will be initialized with the data from the input configuration.
///
/// # Examples
///
/// ```
/// use grammers_client::{Client, Config};
/// use grammers_session::Session;
///
/// // Note: these are example values and are not actually valid.
/// // Obtain your own with the developer's phone at https://my.telegram.org.
/// const API_ID: i32 = 932939;
/// const API_HASH: &str = "514727c32270b9eb8cc16daf17e21e57";
///
/// # async fn f(mut client: Client) -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::connect(Config {
/// session: Session::load_or_create("hello-world.session")?,
/// api_id: API_ID,
/// api_hash: API_HASH.to_string(),
/// params: Default::default(),
/// }).await?;
/// # Ok(())
/// # }
/// ```
pub async fn connect(mut config: Config) -> Result<Self, AuthorizationError> {
let sender = connect_sender(config.session.user_dc.unwrap_or(0), &mut config).await?;
// TODO Sender doesn't have a way to handle backpressure yet
let (handle_tx, handle_rx) = mpsc::unbounded_channel();
Ok(Self {
sender,
config,
handle_tx,
handle_rx,
})
}
/// Invoke a raw API call without the need to use a [`Client::handle`] or having to repeatedly
/// call [`Client::step`]. This directly sends the request to Telegram's servers.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
self.sender.invoke(request).await
}
/// Return a new [`ClientHandle`] that can be used to invoke remote procedure calls.
///
/// # Examples
///
/// ```
/// use tokio::task;
///
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// // Obtain a handle. After this you can obtain more by using `client_handle.clone()`.
/// let mut client_handle = client.handle();
///
/// // Run the network loop. This is necessary, or no network events will be processed!
/// let network_handle = task::spawn(async move { client.run_until_disconnected().await });
///
/// // Use the `client_handle` to your heart's content, maybe you just want to disconnect:
/// client_handle.disconnect().await;
///
/// // Joining on the spawned task lets us access the result from `run_until_disconnected`,
/// // so we can verify everything went fine. You could also just drop this though.
/// network_handle.await?;
/// # Ok(())
/// # }
///
pub fn handle(&self) -> ClientHandle {
ClientHandle {
tx: self.handle_tx.clone(),
}
}
/// Perform a single network step or processing of incoming requests via handles.
///
/// If a server message is received, requests enqueued via the [`ClientHandle`]s may have
/// their result delivered via a channel, and a (possibly empty) list of updates will be
/// returned.
///
/// The other return values are graceful disconnection, or a read error.
///
/// Most commonly, you will want to use the higher-level abstraction [`Client::next_updates`]
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_client::NetworkStep;
///
/// loop {
/// // Process network events forever until we gracefully disconnect or get an error.
/// match client.step().await? {
/// NetworkStep::Connected {.. } => continue,
/// NetworkStep::Disconnected => break,
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn step(&mut self) -> Result<Step, sender::ReadError> {
let (network, request) = {
let network = self.sender.step();
let request = self.handle_rx.recv();
pin_mut!(network);
pin_mut!(request);
match future::select(network, request).await {
future::Either::Left((network, request)) => {
let request = request.now_or_never();
(Some(network), request)
}
future::Either::Right((request, network)) => {
let network = network.now_or_never();
(network, Some(request))
}
}
};
if let Some(request) = request {
let request = request.expect("mpsc returned None");
match request {
Request::Rpc { request, response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(self.sender.enqueue_body(request)));
}
Request::Disconnect { response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(()));
return Ok(Step::Disconnected);
}
}
}
// TODO request cancellation if this is Err
// (perhaps a method on the sender to cancel_all)
Ok(Step::Connected {
updates: if let Some(updates) = network {
updates?
} else {
Vec::new()
},
})
}
/// Run the client by repeatedly calling [`Client::step`] until a graceful disconnection
/// occurs, or a network error occurs. Incoming updates are ignored and simply dropped.
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// client.run_until_disconnected().await?;
/// # Ok(())
/// # }
/// ```
pub async fn run_until_disconnected(mut self) -> Result<(), sender::ReadError> {
loop {
match self.step().await? {
Step::Connected {.. } => continue,
Step::Disconnected => break Ok(()),
}
}
}
}
/// Method implementations directly related with network connectivity.
impl ClientHandle {
/// Invoke a raw API call.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
let (response, rx) = oneshot::channel();
// TODO add a test this (using handle with client dropped)
if let Err(_) = self.tx.send(Request::Rpc {
request: request.to_bytes(),
response,
}) {
// `Client` was dropped, can no longer send requests
return Err(InvocationError::Dropped);
}
// First receive the `oneshot::Receiver` with from the `Client`,
// then `await` on that to receive the response body for the request.
if let Ok(response) = rx.await {
if let Ok(result) = response.await {
match result {
Ok(body) => R::Return::from_bytes(&body).map_err(|e| e.into()),
Err(e) => Err(e),
}
} else {
// `Sender` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
} else {
// `Client` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
}
/// Gracefully tell the [`Client`] that created this handle to disconnect and stop receiving
/// things from the network.
///
/// If the client has already been dropped (and thus disconnected), this method does nothing.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) {
/// client.disconnect().await;
/// # }
/// ```
pub async fn disconnect(&mut self) {
let (response, rx) = oneshot::channel();
if let Ok(_) = self.tx.send(Request::Disconnect { response }) {
// It's fine to drop errors here, it means the channel was dropped by the `Client`.
drop(rx.await);
} else |
}
}
| {
// `Client` is already dropped, no need to disconnect again.
} | conditional_block |
net.rs | // Copyright 2020 - developers of the `grammers` project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use super::updates::UpdateIter;
use super::{Client, ClientHandle, Config, Request, Step};
use futures::future::FutureExt as _;
use futures::{future, pin_mut};
use grammers_mtproto::{mtp, transport};
use grammers_mtsender::{self as sender, AuthorizationError, InvocationError, Sender};
use grammers_tl_types::{self as tl, Deserializable};
use log::info;
use std::net::Ipv4Addr;
use tokio::sync::{mpsc, oneshot};
/// Socket addresses to Telegram datacenters, where the index into this array
/// represents the data center ID.
///
/// The addresses were obtained from the `static` addresses through a call to
/// `functions::help::GetConfig`.
const DC_ADDRESSES: [(Ipv4Addr, u16); 6] = [
(Ipv4Addr::new(149, 154, 167, 51), 443), // default (2)
(Ipv4Addr::new(149, 154, 175, 53), 443),
(Ipv4Addr::new(149, 154, 167, 51), 443),
(Ipv4Addr::new(149, 154, 175, 100), 443),
(Ipv4Addr::new(149, 154, 167, 92), 443),
(Ipv4Addr::new(91, 108, 56, 190), 443),
];
pub(crate) async fn connect_sender(
dc_id: i32,
config: &mut Config,
) -> Result<Sender<transport::Full, mtp::Encrypted>, AuthorizationError> {
let transport = transport::Full::new();
let addr = DC_ADDRESSES[dc_id as usize];
let mut sender = if let Some(auth_key) = config.session.auth_key.as_ref() {
info!(
"creating a new sender with existing auth key to dc {} {:?}",
dc_id, addr
);
sender::connect_with_auth(transport, addr, auth_key.clone()).await?
} else {
info!(
"creating a new sender and auth key in dc {} {:?}",
dc_id, addr
);
let sender = sender::connect(transport, addr).await?;
config.session.auth_key = Some(sender.auth_key().clone());
config.session.save()?;
sender
};
// TODO handle -404 (we had a previously-valid authkey, but server no longer knows about it)
// TODO all up-to-date server addresses should be stored in the session for future initial connections
let _remote_config = sender
.invoke(&tl::functions::InvokeWithLayer {
layer: tl::LAYER,
query: tl::functions::InitConnection {
api_id: config.api_id,
device_model: config.params.device_model.clone(),
system_version: config.params.system_version.clone(),
app_version: config.params.app_version.clone(),
system_lang_code: config.params.system_lang_code.clone(),
lang_pack: "".into(),
lang_code: config.params.lang_code.clone(),
proxy: None,
params: None,
query: tl::functions::help::GetConfig {},
},
})
.await?;
// TODO use the dc id from the config as "this dc", not the input dc id
config.session.user_dc = Some(dc_id);
config.session.save()?;
Ok(sender)
}
/// Method implementations directly related with network connectivity.
impl Client {
/// Creates and returns a new client instance upon successful connection to Telegram.
///
/// If the session in the configuration did not have an authorization key, a new one
/// will be created and the session will be saved with it.
///
/// The connection will be initialized with the data from the input configuration.
///
/// # Examples
///
/// ```
/// use grammers_client::{Client, Config};
/// use grammers_session::Session;
///
/// // Note: these are example values and are not actually valid.
/// // Obtain your own with the developer's phone at https://my.telegram.org.
/// const API_ID: i32 = 932939;
/// const API_HASH: &str = "514727c32270b9eb8cc16daf17e21e57";
///
/// # async fn f(mut client: Client) -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::connect(Config {
/// session: Session::load_or_create("hello-world.session")?,
/// api_id: API_ID,
/// api_hash: API_HASH.to_string(),
/// params: Default::default(),
/// }).await?;
/// # Ok(())
/// # }
/// ```
pub async fn connect(mut config: Config) -> Result<Self, AuthorizationError> {
let sender = connect_sender(config.session.user_dc.unwrap_or(0), &mut config).await?;
// TODO Sender doesn't have a way to handle backpressure yet
let (handle_tx, handle_rx) = mpsc::unbounded_channel();
Ok(Self {
sender,
config,
handle_tx,
handle_rx,
})
}
/// Invoke a raw API call without the need to use a [`Client::handle`] or having to repeatedly
/// call [`Client::step`]. This directly sends the request to Telegram's servers.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
self.sender.invoke(request).await
}
/// Return a new [`ClientHandle`] that can be used to invoke remote procedure calls.
///
/// # Examples
///
/// ```
/// use tokio::task;
///
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// // Obtain a handle. After this you can obtain more by using `client_handle.clone()`.
/// let mut client_handle = client.handle();
///
/// // Run the network loop. This is necessary, or no network events will be processed!
/// let network_handle = task::spawn(async move { client.run_until_disconnected().await });
///
/// // Use the `client_handle` to your heart's content, maybe you just want to disconnect:
/// client_handle.disconnect().await;
///
/// // Joining on the spawned task lets us access the result from `run_until_disconnected`,
/// // so we can verify everything went fine. You could also just drop this though.
/// network_handle.await?;
/// # Ok(())
/// # }
///
pub fn handle(&self) -> ClientHandle {
ClientHandle {
tx: self.handle_tx.clone(),
}
}
/// Perform a single network step or processing of incoming requests via handles.
///
/// If a server message is received, requests enqueued via the [`ClientHandle`]s may have
/// their result delivered via a channel, and a (possibly empty) list of updates will be
/// returned.
///
/// The other return values are graceful disconnection, or a read error.
///
/// Most commonly, you will want to use the higher-level abstraction [`Client::next_updates`]
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_client::NetworkStep;
///
/// loop {
/// // Process network events forever until we gracefully disconnect or get an error.
/// match client.step().await? {
/// NetworkStep::Connected {.. } => continue,
/// NetworkStep::Disconnected => break,
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn step(&mut self) -> Result<Step, sender::ReadError> {
let (network, request) = {
let network = self.sender.step();
let request = self.handle_rx.recv();
pin_mut!(network);
pin_mut!(request);
match future::select(network, request).await {
future::Either::Left((network, request)) => {
let request = request.now_or_never();
(Some(network), request)
}
future::Either::Right((request, network)) => {
let network = network.now_or_never();
(network, Some(request))
}
}
};
if let Some(request) = request {
let request = request.expect("mpsc returned None");
match request {
Request::Rpc { request, response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(self.sender.enqueue_body(request)));
}
Request::Disconnect { response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(()));
return Ok(Step::Disconnected);
}
}
}
// TODO request cancellation if this is Err
// (perhaps a method on the sender to cancel_all)
Ok(Step::Connected {
updates: if let Some(updates) = network {
updates?
} else {
Vec::new()
},
})
}
/// Run the client by repeatedly calling [`Client::step`] until a graceful disconnection
/// occurs, or a network error occurs. Incoming updates are ignored and simply dropped.
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// client.run_until_disconnected().await?;
/// # Ok(())
/// # }
/// ```
pub async fn run_until_disconnected(mut self) -> Result<(), sender::ReadError> {
loop {
match self.step().await? {
Step::Connected {.. } => continue,
Step::Disconnected => break Ok(()),
}
}
}
}
/// Method implementations directly related with network connectivity.
impl ClientHandle {
/// Invoke a raw API call.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn | <R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
let (response, rx) = oneshot::channel();
// TODO add a test this (using handle with client dropped)
if let Err(_) = self.tx.send(Request::Rpc {
request: request.to_bytes(),
response,
}) {
// `Client` was dropped, can no longer send requests
return Err(InvocationError::Dropped);
}
// First receive the `oneshot::Receiver` with from the `Client`,
// then `await` on that to receive the response body for the request.
if let Ok(response) = rx.await {
if let Ok(result) = response.await {
match result {
Ok(body) => R::Return::from_bytes(&body).map_err(|e| e.into()),
Err(e) => Err(e),
}
} else {
// `Sender` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
} else {
// `Client` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
}
/// Gracefully tell the [`Client`] that created this handle to disconnect and stop receiving
/// things from the network.
///
/// If the client has already been dropped (and thus disconnected), this method does nothing.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) {
/// client.disconnect().await;
/// # }
/// ```
pub async fn disconnect(&mut self) {
let (response, rx) = oneshot::channel();
if let Ok(_) = self.tx.send(Request::Disconnect { response }) {
// It's fine to drop errors here, it means the channel was dropped by the `Client`.
drop(rx.await);
} else {
// `Client` is already dropped, no need to disconnect again.
}
}
}
| invoke | identifier_name |
net.rs | // Copyright 2020 - developers of the `grammers` project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use super::updates::UpdateIter;
use super::{Client, ClientHandle, Config, Request, Step};
use futures::future::FutureExt as _;
use futures::{future, pin_mut};
use grammers_mtproto::{mtp, transport};
use grammers_mtsender::{self as sender, AuthorizationError, InvocationError, Sender};
use grammers_tl_types::{self as tl, Deserializable};
use log::info;
use std::net::Ipv4Addr;
use tokio::sync::{mpsc, oneshot};
/// Socket addresses to Telegram datacenters, where the index into this array
/// represents the data center ID.
///
/// The addresses were obtained from the `static` addresses through a call to
/// `functions::help::GetConfig`.
const DC_ADDRESSES: [(Ipv4Addr, u16); 6] = [
(Ipv4Addr::new(149, 154, 167, 51), 443), // default (2)
(Ipv4Addr::new(149, 154, 175, 53), 443),
(Ipv4Addr::new(149, 154, 167, 51), 443),
(Ipv4Addr::new(149, 154, 175, 100), 443),
(Ipv4Addr::new(149, 154, 167, 92), 443),
(Ipv4Addr::new(91, 108, 56, 190), 443),
];
pub(crate) async fn connect_sender(
dc_id: i32,
config: &mut Config,
) -> Result<Sender<transport::Full, mtp::Encrypted>, AuthorizationError> {
let transport = transport::Full::new();
let addr = DC_ADDRESSES[dc_id as usize];
let mut sender = if let Some(auth_key) = config.session.auth_key.as_ref() {
info!(
"creating a new sender with existing auth key to dc {} {:?}",
dc_id, addr
);
sender::connect_with_auth(transport, addr, auth_key.clone()).await?
} else {
info!(
"creating a new sender and auth key in dc {} {:?}",
dc_id, addr
);
let sender = sender::connect(transport, addr).await?;
config.session.auth_key = Some(sender.auth_key().clone());
config.session.save()?;
sender
};
// TODO handle -404 (we had a previously-valid authkey, but server no longer knows about it)
// TODO all up-to-date server addresses should be stored in the session for future initial connections
let _remote_config = sender
.invoke(&tl::functions::InvokeWithLayer {
layer: tl::LAYER,
query: tl::functions::InitConnection {
api_id: config.api_id,
device_model: config.params.device_model.clone(),
system_version: config.params.system_version.clone(),
app_version: config.params.app_version.clone(),
system_lang_code: config.params.system_lang_code.clone(),
lang_pack: "".into(),
lang_code: config.params.lang_code.clone(),
proxy: None,
params: None,
query: tl::functions::help::GetConfig {},
},
})
.await?;
// TODO use the dc id from the config as "this dc", not the input dc id
config.session.user_dc = Some(dc_id);
config.session.save()?;
Ok(sender)
}
/// Method implementations directly related with network connectivity.
impl Client {
/// Creates and returns a new client instance upon successful connection to Telegram.
///
/// If the session in the configuration did not have an authorization key, a new one
/// will be created and the session will be saved with it.
///
/// The connection will be initialized with the data from the input configuration.
///
/// # Examples
///
/// ```
/// use grammers_client::{Client, Config};
/// use grammers_session::Session;
///
/// // Note: these are example values and are not actually valid.
/// // Obtain your own with the developer's phone at https://my.telegram.org.
/// const API_ID: i32 = 932939;
/// const API_HASH: &str = "514727c32270b9eb8cc16daf17e21e57";
///
/// # async fn f(mut client: Client) -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::connect(Config {
/// session: Session::load_or_create("hello-world.session")?,
/// api_id: API_ID,
/// api_hash: API_HASH.to_string(),
/// params: Default::default(),
/// }).await?;
/// # Ok(())
/// # }
/// ```
pub async fn connect(mut config: Config) -> Result<Self, AuthorizationError> {
let sender = connect_sender(config.session.user_dc.unwrap_or(0), &mut config).await?;
// TODO Sender doesn't have a way to handle backpressure yet
let (handle_tx, handle_rx) = mpsc::unbounded_channel();
Ok(Self {
sender,
config,
handle_tx,
handle_rx,
})
}
/// Invoke a raw API call without the need to use a [`Client::handle`] or having to repeatedly
/// call [`Client::step`]. This directly sends the request to Telegram's servers.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
self.sender.invoke(request).await
}
/// Return a new [`ClientHandle`] that can be used to invoke remote procedure calls.
///
/// # Examples
///
/// ```
/// use tokio::task;
///
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// // Obtain a handle. After this you can obtain more by using `client_handle.clone()`.
/// let mut client_handle = client.handle();
///
/// // Run the network loop. This is necessary, or no network events will be processed!
/// let network_handle = task::spawn(async move { client.run_until_disconnected().await });
///
/// // Use the `client_handle` to your heart's content, maybe you just want to disconnect:
/// client_handle.disconnect().await;
///
/// // Joining on the spawned task lets us access the result from `run_until_disconnected`,
/// // so we can verify everything went fine. You could also just drop this though.
/// network_handle.await?;
/// # Ok(())
/// # }
///
pub fn handle(&self) -> ClientHandle {
ClientHandle {
tx: self.handle_tx.clone(),
}
}
/// Perform a single network step or processing of incoming requests via handles.
///
/// If a server message is received, requests enqueued via the [`ClientHandle`]s may have
/// their result delivered via a channel, and a (possibly empty) list of updates will be
/// returned.
///
/// The other return values are graceful disconnection, or a read error.
///
/// Most commonly, you will want to use the higher-level abstraction [`Client::next_updates`]
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_client::NetworkStep;
///
/// loop {
/// // Process network events forever until we gracefully disconnect or get an error.
/// match client.step().await? {
/// NetworkStep::Connected {.. } => continue,
/// NetworkStep::Disconnected => break,
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn step(&mut self) -> Result<Step, sender::ReadError> {
let (network, request) = {
let network = self.sender.step();
let request = self.handle_rx.recv();
pin_mut!(network);
pin_mut!(request);
match future::select(network, request).await {
future::Either::Left((network, request)) => {
let request = request.now_or_never();
(Some(network), request)
}
future::Either::Right((request, network)) => {
let network = network.now_or_never();
(network, Some(request))
}
}
};
if let Some(request) = request {
let request = request.expect("mpsc returned None");
match request {
Request::Rpc { request, response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(self.sender.enqueue_body(request)));
}
Request::Disconnect { response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(()));
return Ok(Step::Disconnected);
}
}
}
// TODO request cancellation if this is Err
// (perhaps a method on the sender to cancel_all)
Ok(Step::Connected {
updates: if let Some(updates) = network {
updates?
} else {
Vec::new()
},
})
}
/// Run the client by repeatedly calling [`Client::step`] until a graceful disconnection
/// occurs, or a network error occurs. Incoming updates are ignored and simply dropped.
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// client.run_until_disconnected().await?;
/// # Ok(())
/// # }
/// ```
pub async fn run_until_disconnected(mut self) -> Result<(), sender::ReadError> {
loop {
match self.step().await? {
Step::Connected {.. } => continue,
Step::Disconnected => break Ok(()),
}
}
}
}
/// Method implementations directly related with network connectivity.
impl ClientHandle {
/// Invoke a raw API call.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
let (response, rx) = oneshot::channel();
// TODO add a test this (using handle with client dropped)
if let Err(_) = self.tx.send(Request::Rpc {
request: request.to_bytes(),
response,
}) {
// `Client` was dropped, can no longer send requests
return Err(InvocationError::Dropped);
}
// First receive the `oneshot::Receiver` with from the `Client`,
// then `await` on that to receive the response body for the request.
if let Ok(response) = rx.await {
if let Ok(result) = response.await {
match result {
Ok(body) => R::Return::from_bytes(&body).map_err(|e| e.into()),
Err(e) => Err(e),
}
} else {
// `Sender` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
} else {
// `Client` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
}
/// Gracefully tell the [`Client`] that created this handle to disconnect and stop receiving
/// things from the network.
///
/// If the client has already been dropped (and thus disconnected), this method does nothing.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) {
/// client.disconnect().await;
/// # }
/// ```
pub async fn disconnect(&mut self) {
let (response, rx) = oneshot::channel();
if let Ok(_) = self.tx.send(Request::Disconnect { response }) {
// It's fine to drop errors here, it means the channel was dropped by the `Client`.
drop(rx.await);
} else {
// `Client` is already dropped, no need to disconnect again. | }
} | } | random_line_split |
net.rs | // Copyright 2020 - developers of the `grammers` project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use super::updates::UpdateIter;
use super::{Client, ClientHandle, Config, Request, Step};
use futures::future::FutureExt as _;
use futures::{future, pin_mut};
use grammers_mtproto::{mtp, transport};
use grammers_mtsender::{self as sender, AuthorizationError, InvocationError, Sender};
use grammers_tl_types::{self as tl, Deserializable};
use log::info;
use std::net::Ipv4Addr;
use tokio::sync::{mpsc, oneshot};
/// Socket addresses to Telegram datacenters, where the index into this array
/// represents the data center ID.
///
/// The addresses were obtained from the `static` addresses through a call to
/// `functions::help::GetConfig`.
const DC_ADDRESSES: [(Ipv4Addr, u16); 6] = [
(Ipv4Addr::new(149, 154, 167, 51), 443), // default (2)
(Ipv4Addr::new(149, 154, 175, 53), 443),
(Ipv4Addr::new(149, 154, 167, 51), 443),
(Ipv4Addr::new(149, 154, 175, 100), 443),
(Ipv4Addr::new(149, 154, 167, 92), 443),
(Ipv4Addr::new(91, 108, 56, 190), 443),
];
pub(crate) async fn connect_sender(
dc_id: i32,
config: &mut Config,
) -> Result<Sender<transport::Full, mtp::Encrypted>, AuthorizationError> | sender
};
// TODO handle -404 (we had a previously-valid authkey, but server no longer knows about it)
// TODO all up-to-date server addresses should be stored in the session for future initial connections
let _remote_config = sender
.invoke(&tl::functions::InvokeWithLayer {
layer: tl::LAYER,
query: tl::functions::InitConnection {
api_id: config.api_id,
device_model: config.params.device_model.clone(),
system_version: config.params.system_version.clone(),
app_version: config.params.app_version.clone(),
system_lang_code: config.params.system_lang_code.clone(),
lang_pack: "".into(),
lang_code: config.params.lang_code.clone(),
proxy: None,
params: None,
query: tl::functions::help::GetConfig {},
},
})
.await?;
// TODO use the dc id from the config as "this dc", not the input dc id
config.session.user_dc = Some(dc_id);
config.session.save()?;
Ok(sender)
}
/// Method implementations directly related with network connectivity.
impl Client {
/// Creates and returns a new client instance upon successful connection to Telegram.
///
/// If the session in the configuration did not have an authorization key, a new one
/// will be created and the session will be saved with it.
///
/// The connection will be initialized with the data from the input configuration.
///
/// # Examples
///
/// ```
/// use grammers_client::{Client, Config};
/// use grammers_session::Session;
///
/// // Note: these are example values and are not actually valid.
/// // Obtain your own with the developer's phone at https://my.telegram.org.
/// const API_ID: i32 = 932939;
/// const API_HASH: &str = "514727c32270b9eb8cc16daf17e21e57";
///
/// # async fn f(mut client: Client) -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::connect(Config {
/// session: Session::load_or_create("hello-world.session")?,
/// api_id: API_ID,
/// api_hash: API_HASH.to_string(),
/// params: Default::default(),
/// }).await?;
/// # Ok(())
/// # }
/// ```
pub async fn connect(mut config: Config) -> Result<Self, AuthorizationError> {
let sender = connect_sender(config.session.user_dc.unwrap_or(0), &mut config).await?;
// TODO Sender doesn't have a way to handle backpressure yet
let (handle_tx, handle_rx) = mpsc::unbounded_channel();
Ok(Self {
sender,
config,
handle_tx,
handle_rx,
})
}
/// Invoke a raw API call without the need to use a [`Client::handle`] or having to repeatedly
/// call [`Client::step`]. This directly sends the request to Telegram's servers.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
self.sender.invoke(request).await
}
/// Return a new [`ClientHandle`] that can be used to invoke remote procedure calls.
///
/// # Examples
///
/// ```
/// use tokio::task;
///
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// // Obtain a handle. After this you can obtain more by using `client_handle.clone()`.
/// let mut client_handle = client.handle();
///
/// // Run the network loop. This is necessary, or no network events will be processed!
/// let network_handle = task::spawn(async move { client.run_until_disconnected().await });
///
/// // Use the `client_handle` to your heart's content, maybe you just want to disconnect:
/// client_handle.disconnect().await;
///
/// // Joining on the spawned task lets us access the result from `run_until_disconnected`,
/// // so we can verify everything went fine. You could also just drop this though.
/// network_handle.await?;
/// # Ok(())
/// # }
///
pub fn handle(&self) -> ClientHandle {
ClientHandle {
tx: self.handle_tx.clone(),
}
}
/// Perform a single network step or processing of incoming requests via handles.
///
/// If a server message is received, requests enqueued via the [`ClientHandle`]s may have
/// their result delivered via a channel, and a (possibly empty) list of updates will be
/// returned.
///
/// The other return values are graceful disconnection, or a read error.
///
/// Most commonly, you will want to use the higher-level abstraction [`Client::next_updates`]
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_client::NetworkStep;
///
/// loop {
/// // Process network events forever until we gracefully disconnect or get an error.
/// match client.step().await? {
/// NetworkStep::Connected {.. } => continue,
/// NetworkStep::Disconnected => break,
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn step(&mut self) -> Result<Step, sender::ReadError> {
let (network, request) = {
let network = self.sender.step();
let request = self.handle_rx.recv();
pin_mut!(network);
pin_mut!(request);
match future::select(network, request).await {
future::Either::Left((network, request)) => {
let request = request.now_or_never();
(Some(network), request)
}
future::Either::Right((request, network)) => {
let network = network.now_or_never();
(network, Some(request))
}
}
};
if let Some(request) = request {
let request = request.expect("mpsc returned None");
match request {
Request::Rpc { request, response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(self.sender.enqueue_body(request)));
}
Request::Disconnect { response } => {
// Channel will return `Err` if the `ClientHandle` lost interest, just drop the error.
drop(response.send(()));
return Ok(Step::Disconnected);
}
}
}
// TODO request cancellation if this is Err
// (perhaps a method on the sender to cancel_all)
Ok(Step::Connected {
updates: if let Some(updates) = network {
updates?
} else {
Vec::new()
},
})
}
/// Run the client by repeatedly calling [`Client::step`] until a graceful disconnection
/// occurs, or a network error occurs. Incoming updates are ignored and simply dropped.
/// instead.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// client.run_until_disconnected().await?;
/// # Ok(())
/// # }
/// ```
pub async fn run_until_disconnected(mut self) -> Result<(), sender::ReadError> {
loop {
match self.step().await? {
Step::Connected {.. } => continue,
Step::Disconnected => break Ok(()),
}
}
}
}
/// Method implementations directly related with network connectivity.
impl ClientHandle {
/// Invoke a raw API call.
///
/// Using function definitions corresponding to a different layer is likely to cause the
/// responses to the request to not be understood.
///
/// <div class="stab unstable">
///
/// **Warning**: this method is **not** part of the stability guarantees of semantic
/// versioning. It **may** break during *minor* version changes (but not on patch version
/// changes). Use with care.
///
/// </div>
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_tl_types as tl;
///
/// dbg!(client.invoke(&tl::functions::Ping { ping_id: 0 }).await?);
/// # Ok(())
/// # }
/// ```
pub async fn invoke<R: tl::RemoteCall>(
&mut self,
request: &R,
) -> Result<R::Return, InvocationError> {
let (response, rx) = oneshot::channel();
// TODO add a test this (using handle with client dropped)
if let Err(_) = self.tx.send(Request::Rpc {
request: request.to_bytes(),
response,
}) {
// `Client` was dropped, can no longer send requests
return Err(InvocationError::Dropped);
}
// First receive the `oneshot::Receiver` with from the `Client`,
// then `await` on that to receive the response body for the request.
if let Ok(response) = rx.await {
if let Ok(result) = response.await {
match result {
Ok(body) => R::Return::from_bytes(&body).map_err(|e| e.into()),
Err(e) => Err(e),
}
} else {
// `Sender` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
} else {
// `Client` dropped, won't be receiving a response for this
Err(InvocationError::Dropped)
}
}
/// Gracefully tell the [`Client`] that created this handle to disconnect and stop receiving
/// things from the network.
///
/// If the client has already been dropped (and thus disconnected), this method does nothing.
///
/// # Examples
///
/// ```
/// # async fn f(mut client: grammers_client::ClientHandle) {
/// client.disconnect().await;
/// # }
/// ```
pub async fn disconnect(&mut self) {
let (response, rx) = oneshot::channel();
if let Ok(_) = self.tx.send(Request::Disconnect { response }) {
// It's fine to drop errors here, it means the channel was dropped by the `Client`.
drop(rx.await);
} else {
// `Client` is already dropped, no need to disconnect again.
}
}
}
| {
let transport = transport::Full::new();
let addr = DC_ADDRESSES[dc_id as usize];
let mut sender = if let Some(auth_key) = config.session.auth_key.as_ref() {
info!(
"creating a new sender with existing auth key to dc {} {:?}",
dc_id, addr
);
sender::connect_with_auth(transport, addr, auth_key.clone()).await?
} else {
info!(
"creating a new sender and auth key in dc {} {:?}",
dc_id, addr
);
let sender = sender::connect(transport, addr).await?;
config.session.auth_key = Some(sender.auth_key().clone());
config.session.save()?; | identifier_body |
main.rs | use crate::{
config::{save_config, Config},
repo::Repo,
};
use anyhow::{anyhow, bail, Context, Result};
use clap::{AppSettings, Clap};
use lazy_static::lazy_static;
use regex::Regex;
use std::env;
use std::io::{self, Read};
use url::Url;
mod config;
mod git;
mod github;
mod gitlab;
mod repo;
lazy_static! {
static ref API_SOURCE_REGEX: Regex =
Regex::new(r"(?P<alias>^\w+)(@(?P<ref>\w+))?:(?P<script>.+)$").unwrap();
static ref GIT_SOURCE_REGEX: Regex =
Regex::new(r"^(?P<repo>((git|ssh|http(s)?)|(git@[\w\.]+))(:(//)?)([\w\./\-~]+)(\.git)?(/)?)(@(?P<ref>\w+))?:(?P<script>.+)$")
.unwrap();
}
#[derive(Clap, Debug)]
#[clap(author, about, version)]
#[clap(global_setting = AppSettings::ColoredHelp)]
#[clap(setting = AppSettings::DeriveDisplayOrder)]
#[clap(setting = AppSettings::SubcommandRequiredElseHelp)]
struct Opts {
#[clap(subcommand)]
command: Command,
}
const SCRIPT_HELP: &'static str = r"Script identifier for a script from a repository
For saved repos: `<repo>[@ref]:<script_path>`
Example: `myscripts:hello.bash`
Example (w/ ref): `[email protected]:hello.bash`
For git repos: `git@<repo_url>[@ref]:<script_path>`
Example: `[email protected]:user/myscripts:hello.bash`
Example (w/ ref): `[email protected]:user/myscripts@main:hello.bash`
";
#[derive(Clap, Debug)]
enum Command {
/// Read and modify locally saved repositories
Repo {
#[clap(subcommand)]
command: RepoCommand,
},
/// Run a script using the locally installed bash shell
Run {
/// Force a fresh download of the script (only for raw git repositories)
#[clap(short, long)]
fresh: bool,
#[clap(about = "Script to run", long_about = SCRIPT_HELP)]
script: String,
/// Args to be passed to the script
#[clap(about = "Args to be passed to the script")]
args: Vec<String>,
},
/// Import a script and print it to stdout
Import {
#[clap(short, long)]
fresh: bool,
#[clap(about = "Script to import", long_about = SCRIPT_HELP)]
script: String,
},
}
#[derive(Clap, Debug)]
enum RepoCommand {
/// List all locally saved repositories
#[clap(alias = "ls")]
List,
/// Add a repository to the local repository list
Add {
/// Local alias for the repository to add
name: String,
/// URI of the repository to add
uri: String,
/// Username for the repository (if required)
#[clap(long, short)]
username: Option<String>,
/// Password or token for the repository (if required)
#[clap(long, short)]
password: Option<String>,
/// Reads the password from the given environment variable when the repo is used
#[clap(long)]
password_env: Option<String>,
/// Reads the password or token from stdin
#[clap(long)]
password_stdin: bool,
},
/// Remove a repository from the local repository list
#[clap(alias = "rm")]
Remove {
/// Local alias for the repository to remove
name: String,
},
}
#[derive(PartialEq)]
pub enum Password {
Saved(String),
FromEnv(String, String),
None,
}
#[tokio::main]
async fn main() -> Result<()> {
openssl_probe::init_ssl_cert_env_vars();
let mut config = config::load_config().await?;
match Opts::parse().command {
Command::Repo { command } => match command {
RepoCommand::List => {
if config.repo.is_empty() {
println!("No Saved repositories.");
return Ok(());
}
println!("Saved repositories:");
for (k, v) in config.repo {
println!(" {} ({} | {})", k, v.provider(), v.readable());
}
}
RepoCommand::Add {
name,
uri,
username,
password,
password_env,
password_stdin,
} => {
if config.repo.contains_key(&name) {
bail!("A repository with the name `{}` already exists", &name);
}
let password_for_parse = match (password, password_env, password_stdin) {
(Some(pass), _, _) => Password::Saved(pass),
(_, Some(var), _) => Password::FromEnv(var.clone(), env::var(var)?),
(_, _, true) => {
let mut buf = String::new();
io::stdin().read_to_string(&mut buf)?;
Password::Saved(buf)
}
_ => Password::None,
};
let repo = validate_api_repo(&uri, username, password_for_parse).await?;
config.repo.insert(name.clone(), repo);
save_config(&config)
.await
.context("Failed to save updated config")?;
println!("Repo `{}` was successfully added", &name);
}
RepoCommand::Remove { name } => {
if!config.repo.contains_key(&name) {
bail!("Repo `{}` was not found", &name);
}
config.repo.remove(&name);
save_config(&config)
.await
.context("Failed to save updated config")?;
println!("Repo `{}` was removed", &name);
}
},
Command::Run {
script,
args,
fresh,
} => {
let src = ScriptSource::parse(&script, ScriptAction::Run)?;
src.validate_script_name(&config)?;
let contents = src.fetch_script_contents(&config, fresh).await?;
let args = args.iter().map(|s| &**s).collect();
// TODO(happens): Find a way to propagate the actual exit code
// instead of simply returning 0/1 depending on the script.
// This should cover most use cases if you just want to know
// if the script failed, but until `std::process::Termination`
// is stabilized, it seems unsafe to use `std::process::exit`
// since we're using a tokio main.
let exit = repo::run_script(&contents, args).await?;
if!exit.success() {
bail!("");
}
}
Command::Import { script, fresh } => {
let src = ScriptSource::parse(&script, ScriptAction::Import)?;
src.validate_script_name(&config)?;
let contents = src.fetch_script_contents(&config, fresh).await?;
repo::import_script(&contents).await?;
}
};
Ok(())
}
enum ScriptAction {
Run,
Import,
}
pub struct ScriptSource {
repo: String,
source_type: SourceType,
script_name: String,
rref: Option<String>,
action: ScriptAction,
}
enum | {
Git,
Saved,
}
impl ScriptSource {
fn parse(script: &str, action: ScriptAction) -> Result<ScriptSource> {
if let Some(matches) = API_SOURCE_REGEX.captures(script) {
let repo = matches
.name("alias")
.expect("No alias matched")
.as_str()
.to_owned();
let script_name = matches
.name("script")
.expect("No script name matched")
.as_str()
.to_owned();
let rref = matches.name("ref").map(|rref| rref.as_str().to_owned());
return Ok(Self {
source_type: SourceType::Saved,
repo,
script_name,
rref,
action,
});
}
if let Some(matches) = GIT_SOURCE_REGEX.captures(script) {
let repo = matches
.name("repo")
.expect("No repo matched")
.as_str()
.to_owned();
let script_name = matches
.name("script")
.expect("No script name matched")
.as_str()
.to_owned();
let rref = matches.name("ref").map(|rref| rref.as_str().to_owned());
return Ok(Self {
source_type: SourceType::Git,
repo,
script_name,
rref,
action,
});
}
bail!("Script source could not be parsed")
}
fn validate_script_name(&self, config: &Config) -> Result<()> {
if config.require_bash_extension.is_none() && config.require_lib_extension.is_none() {
return Ok(());
}
let expected = match (
&config.require_bash_extension,
&config.require_lib_extension,
&self.action,
) {
(Some(ref ext), _, &ScriptAction::Run) => ext,
(_, Some(ext), &ScriptAction::Import) => ext,
_ => unreachable!(),
};
if!self.script_name.ends_with(expected) {
bail!("Expected script name to end with `{}`", expected);
}
Ok(())
}
async fn fetch_script_contents(&self, config: &config::Config, fresh: bool) -> Result<String> {
let repo = match self.source_type {
SourceType::Saved => config
.repo
.get(&self.repo)
.ok_or(anyhow!("Repo `{}` was not found", &self.repo))?
.box_clone(),
SourceType::Git => git::GitRepo::from_src(&self),
};
let rref = self.rref.clone().unwrap_or("HEAD".to_owned());
Ok(repo.fetch_script(&self.script_name, &rref, fresh).await?)
}
}
async fn validate_api_repo(
uri: &str,
username: Option<String>,
password: Password,
) -> Result<Box<dyn Repo>> {
let mut maybe_parsed: Option<Url> = None;
// Check if we've been given a raw gitlab or github url without scheme
if uri.starts_with("gitlab.com") || uri.starts_with("github.com") {
let with_scheme = format!("https://{}", uri);
maybe_parsed = Some(Url::parse(&with_scheme)?);
}
// Try parsing the url manually otherwise
let mut parsed = match maybe_parsed {
Some(parsed) => parsed,
None => Url::parse(uri)?,
};
if parsed.cannot_be_a_base() {
bail!("Repo URI was not recognized");
}
// Enforce https
let _ = parsed.set_scheme("https");
match parsed.host_str() {
Some("gitlab.com") => Ok(gitlab::fetch_project(&parsed, password).await?),
Some("github.com") => Ok(github::fetch_project(&parsed, username, password).await?),
Some(_) => bail!("No provider recognized for passed URI"),
None => bail!("No host on passed URI"),
}
}
| SourceType | identifier_name |
main.rs | use crate::{
config::{save_config, Config},
repo::Repo,
};
use anyhow::{anyhow, bail, Context, Result};
use clap::{AppSettings, Clap};
use lazy_static::lazy_static;
use regex::Regex;
use std::env;
use std::io::{self, Read};
use url::Url;
mod config;
mod git;
mod github;
mod gitlab;
mod repo;
lazy_static! {
static ref API_SOURCE_REGEX: Regex =
Regex::new(r"(?P<alias>^\w+)(@(?P<ref>\w+))?:(?P<script>.+)$").unwrap();
static ref GIT_SOURCE_REGEX: Regex =
Regex::new(r"^(?P<repo>((git|ssh|http(s)?)|(git@[\w\.]+))(:(//)?)([\w\./\-~]+)(\.git)?(/)?)(@(?P<ref>\w+))?:(?P<script>.+)$")
.unwrap();
}
#[derive(Clap, Debug)]
#[clap(author, about, version)]
#[clap(global_setting = AppSettings::ColoredHelp)]
#[clap(setting = AppSettings::DeriveDisplayOrder)]
#[clap(setting = AppSettings::SubcommandRequiredElseHelp)]
struct Opts {
#[clap(subcommand)]
command: Command,
}
const SCRIPT_HELP: &'static str = r"Script identifier for a script from a repository
For saved repos: `<repo>[@ref]:<script_path>`
Example: `myscripts:hello.bash`
Example (w/ ref): `[email protected]:hello.bash`
For git repos: `git@<repo_url>[@ref]:<script_path>`
Example: `[email protected]:user/myscripts:hello.bash`
Example (w/ ref): `[email protected]:user/myscripts@main:hello.bash`
";
#[derive(Clap, Debug)]
enum Command {
/// Read and modify locally saved repositories
Repo {
#[clap(subcommand)]
command: RepoCommand,
},
/// Run a script using the locally installed bash shell
Run {
/// Force a fresh download of the script (only for raw git repositories)
#[clap(short, long)]
fresh: bool,
#[clap(about = "Script to run", long_about = SCRIPT_HELP)]
script: String,
/// Args to be passed to the script
#[clap(about = "Args to be passed to the script")]
args: Vec<String>,
},
/// Import a script and print it to stdout
Import {
#[clap(short, long)]
fresh: bool,
#[clap(about = "Script to import", long_about = SCRIPT_HELP)]
script: String,
},
}
#[derive(Clap, Debug)]
enum RepoCommand {
/// List all locally saved repositories
#[clap(alias = "ls")]
List,
/// Add a repository to the local repository list
Add {
/// Local alias for the repository to add
name: String,
/// URI of the repository to add
uri: String,
/// Username for the repository (if required)
#[clap(long, short)]
username: Option<String>,
/// Password or token for the repository (if required)
#[clap(long, short)]
password: Option<String>,
/// Reads the password from the given environment variable when the repo is used
#[clap(long)]
password_env: Option<String>,
/// Reads the password or token from stdin
#[clap(long)]
password_stdin: bool,
},
/// Remove a repository from the local repository list
#[clap(alias = "rm")]
Remove {
/// Local alias for the repository to remove
name: String,
},
}
#[derive(PartialEq)]
pub enum Password {
Saved(String),
FromEnv(String, String),
None,
}
#[tokio::main]
async fn main() -> Result<()> {
openssl_probe::init_ssl_cert_env_vars();
let mut config = config::load_config().await?;
match Opts::parse().command {
Command::Repo { command } => match command {
RepoCommand::List => {
if config.repo.is_empty() {
println!("No Saved repositories.");
return Ok(());
}
println!("Saved repositories:");
for (k, v) in config.repo {
println!(" {} ({} | {})", k, v.provider(), v.readable());
}
}
RepoCommand::Add {
name,
uri,
username,
password,
password_env,
password_stdin,
} => {
if config.repo.contains_key(&name) {
bail!("A repository with the name `{}` already exists", &name);
}
let password_for_parse = match (password, password_env, password_stdin) {
(Some(pass), _, _) => Password::Saved(pass),
(_, Some(var), _) => Password::FromEnv(var.clone(), env::var(var)?),
(_, _, true) => {
let mut buf = String::new();
io::stdin().read_to_string(&mut buf)?;
Password::Saved(buf)
}
_ => Password::None,
};
let repo = validate_api_repo(&uri, username, password_for_parse).await?;
config.repo.insert(name.clone(), repo);
save_config(&config)
.await
.context("Failed to save updated config")?;
println!("Repo `{}` was successfully added", &name);
}
RepoCommand::Remove { name } => {
if!config.repo.contains_key(&name) {
bail!("Repo `{}` was not found", &name);
}
config.repo.remove(&name);
save_config(&config)
.await
.context("Failed to save updated config")?;
println!("Repo `{}` was removed", &name);
}
},
Command::Run {
script,
args,
fresh,
} => {
let src = ScriptSource::parse(&script, ScriptAction::Run)?;
src.validate_script_name(&config)?;
let contents = src.fetch_script_contents(&config, fresh).await?;
let args = args.iter().map(|s| &**s).collect();
// TODO(happens): Find a way to propagate the actual exit code
// instead of simply returning 0/1 depending on the script.
// This should cover most use cases if you just want to know
// if the script failed, but until `std::process::Termination`
// is stabilized, it seems unsafe to use `std::process::exit`
// since we're using a tokio main.
let exit = repo::run_script(&contents, args).await?;
if!exit.success() {
bail!("");
}
}
Command::Import { script, fresh } => {
let src = ScriptSource::parse(&script, ScriptAction::Import)?;
src.validate_script_name(&config)?;
let contents = src.fetch_script_contents(&config, fresh).await?;
repo::import_script(&contents).await?;
}
};
Ok(())
}
enum ScriptAction {
Run,
Import,
}
pub struct ScriptSource {
repo: String,
source_type: SourceType,
script_name: String,
rref: Option<String>,
action: ScriptAction,
}
enum SourceType {
Git,
Saved,
}
impl ScriptSource {
fn parse(script: &str, action: ScriptAction) -> Result<ScriptSource> {
if let Some(matches) = API_SOURCE_REGEX.captures(script) {
let repo = matches
.name("alias")
.expect("No alias matched")
.as_str()
.to_owned();
let script_name = matches
.name("script")
.expect("No script name matched")
.as_str()
.to_owned();
let rref = matches.name("ref").map(|rref| rref.as_str().to_owned());
return Ok(Self {
source_type: SourceType::Saved,
repo,
script_name,
rref,
action,
});
}
if let Some(matches) = GIT_SOURCE_REGEX.captures(script) {
let repo = matches
.name("repo")
.expect("No repo matched")
.as_str()
.to_owned();
let script_name = matches
.name("script")
.expect("No script name matched")
.as_str()
.to_owned();
let rref = matches.name("ref").map(|rref| rref.as_str().to_owned());
return Ok(Self {
source_type: SourceType::Git,
repo,
script_name,
rref,
action,
});
}
bail!("Script source could not be parsed")
}
fn validate_script_name(&self, config: &Config) -> Result<()> {
if config.require_bash_extension.is_none() && config.require_lib_extension.is_none() {
return Ok(());
}
let expected = match (
&config.require_bash_extension,
&config.require_lib_extension,
&self.action,
) {
(Some(ref ext), _, &ScriptAction::Run) => ext,
(_, Some(ext), &ScriptAction::Import) => ext,
_ => unreachable!(),
};
if!self.script_name.ends_with(expected) {
bail!("Expected script name to end with `{}`", expected);
}
Ok(())
}
async fn fetch_script_contents(&self, config: &config::Config, fresh: bool) -> Result<String> {
let repo = match self.source_type {
SourceType::Saved => config
.repo | SourceType::Git => git::GitRepo::from_src(&self),
};
let rref = self.rref.clone().unwrap_or("HEAD".to_owned());
Ok(repo.fetch_script(&self.script_name, &rref, fresh).await?)
}
}
async fn validate_api_repo(
uri: &str,
username: Option<String>,
password: Password,
) -> Result<Box<dyn Repo>> {
let mut maybe_parsed: Option<Url> = None;
// Check if we've been given a raw gitlab or github url without scheme
if uri.starts_with("gitlab.com") || uri.starts_with("github.com") {
let with_scheme = format!("https://{}", uri);
maybe_parsed = Some(Url::parse(&with_scheme)?);
}
// Try parsing the url manually otherwise
let mut parsed = match maybe_parsed {
Some(parsed) => parsed,
None => Url::parse(uri)?,
};
if parsed.cannot_be_a_base() {
bail!("Repo URI was not recognized");
}
// Enforce https
let _ = parsed.set_scheme("https");
match parsed.host_str() {
Some("gitlab.com") => Ok(gitlab::fetch_project(&parsed, password).await?),
Some("github.com") => Ok(github::fetch_project(&parsed, username, password).await?),
Some(_) => bail!("No provider recognized for passed URI"),
None => bail!("No host on passed URI"),
}
} | .get(&self.repo)
.ok_or(anyhow!("Repo `{}` was not found", &self.repo))?
.box_clone(), | random_line_split |
main.rs | use crate::{
config::{save_config, Config},
repo::Repo,
};
use anyhow::{anyhow, bail, Context, Result};
use clap::{AppSettings, Clap};
use lazy_static::lazy_static;
use regex::Regex;
use std::env;
use std::io::{self, Read};
use url::Url;
mod config;
mod git;
mod github;
mod gitlab;
mod repo;
lazy_static! {
static ref API_SOURCE_REGEX: Regex =
Regex::new(r"(?P<alias>^\w+)(@(?P<ref>\w+))?:(?P<script>.+)$").unwrap();
static ref GIT_SOURCE_REGEX: Regex =
Regex::new(r"^(?P<repo>((git|ssh|http(s)?)|(git@[\w\.]+))(:(//)?)([\w\./\-~]+)(\.git)?(/)?)(@(?P<ref>\w+))?:(?P<script>.+)$")
.unwrap();
}
#[derive(Clap, Debug)]
#[clap(author, about, version)]
#[clap(global_setting = AppSettings::ColoredHelp)]
#[clap(setting = AppSettings::DeriveDisplayOrder)]
#[clap(setting = AppSettings::SubcommandRequiredElseHelp)]
struct Opts {
#[clap(subcommand)]
command: Command,
}
const SCRIPT_HELP: &'static str = r"Script identifier for a script from a repository
For saved repos: `<repo>[@ref]:<script_path>`
Example: `myscripts:hello.bash`
Example (w/ ref): `[email protected]:hello.bash`
For git repos: `git@<repo_url>[@ref]:<script_path>`
Example: `[email protected]:user/myscripts:hello.bash`
Example (w/ ref): `[email protected]:user/myscripts@main:hello.bash`
";
#[derive(Clap, Debug)]
enum Command {
/// Read and modify locally saved repositories
Repo {
#[clap(subcommand)]
command: RepoCommand,
},
/// Run a script using the locally installed bash shell
Run {
/// Force a fresh download of the script (only for raw git repositories)
#[clap(short, long)]
fresh: bool,
#[clap(about = "Script to run", long_about = SCRIPT_HELP)]
script: String,
/// Args to be passed to the script
#[clap(about = "Args to be passed to the script")]
args: Vec<String>,
},
/// Import a script and print it to stdout
Import {
#[clap(short, long)]
fresh: bool,
#[clap(about = "Script to import", long_about = SCRIPT_HELP)]
script: String,
},
}
#[derive(Clap, Debug)]
enum RepoCommand {
/// List all locally saved repositories
#[clap(alias = "ls")]
List,
/// Add a repository to the local repository list
Add {
/// Local alias for the repository to add
name: String,
/// URI of the repository to add
uri: String,
/// Username for the repository (if required)
#[clap(long, short)]
username: Option<String>,
/// Password or token for the repository (if required)
#[clap(long, short)]
password: Option<String>,
/// Reads the password from the given environment variable when the repo is used
#[clap(long)]
password_env: Option<String>,
/// Reads the password or token from stdin
#[clap(long)]
password_stdin: bool,
},
/// Remove a repository from the local repository list
#[clap(alias = "rm")]
Remove {
/// Local alias for the repository to remove
name: String,
},
}
#[derive(PartialEq)]
pub enum Password {
Saved(String),
FromEnv(String, String),
None,
}
#[tokio::main]
async fn main() -> Result<()> {
openssl_probe::init_ssl_cert_env_vars();
let mut config = config::load_config().await?;
match Opts::parse().command {
Command::Repo { command } => match command {
RepoCommand::List => {
if config.repo.is_empty() {
println!("No Saved repositories.");
return Ok(());
}
println!("Saved repositories:");
for (k, v) in config.repo {
println!(" {} ({} | {})", k, v.provider(), v.readable());
}
}
RepoCommand::Add {
name,
uri,
username,
password,
password_env,
password_stdin,
} => | .context("Failed to save updated config")?;
println!("Repo `{}` was successfully added", &name);
}
RepoCommand::Remove { name } => {
if!config.repo.contains_key(&name) {
bail!("Repo `{}` was not found", &name);
}
config.repo.remove(&name);
save_config(&config)
.await
.context("Failed to save updated config")?;
println!("Repo `{}` was removed", &name);
}
},
Command::Run {
script,
args,
fresh,
} => {
let src = ScriptSource::parse(&script, ScriptAction::Run)?;
src.validate_script_name(&config)?;
let contents = src.fetch_script_contents(&config, fresh).await?;
let args = args.iter().map(|s| &**s).collect();
// TODO(happens): Find a way to propagate the actual exit code
// instead of simply returning 0/1 depending on the script.
// This should cover most use cases if you just want to know
// if the script failed, but until `std::process::Termination`
// is stabilized, it seems unsafe to use `std::process::exit`
// since we're using a tokio main.
let exit = repo::run_script(&contents, args).await?;
if!exit.success() {
bail!("");
}
}
Command::Import { script, fresh } => {
let src = ScriptSource::parse(&script, ScriptAction::Import)?;
src.validate_script_name(&config)?;
let contents = src.fetch_script_contents(&config, fresh).await?;
repo::import_script(&contents).await?;
}
};
Ok(())
}
enum ScriptAction {
Run,
Import,
}
pub struct ScriptSource {
repo: String,
source_type: SourceType,
script_name: String,
rref: Option<String>,
action: ScriptAction,
}
enum SourceType {
Git,
Saved,
}
impl ScriptSource {
fn parse(script: &str, action: ScriptAction) -> Result<ScriptSource> {
if let Some(matches) = API_SOURCE_REGEX.captures(script) {
let repo = matches
.name("alias")
.expect("No alias matched")
.as_str()
.to_owned();
let script_name = matches
.name("script")
.expect("No script name matched")
.as_str()
.to_owned();
let rref = matches.name("ref").map(|rref| rref.as_str().to_owned());
return Ok(Self {
source_type: SourceType::Saved,
repo,
script_name,
rref,
action,
});
}
if let Some(matches) = GIT_SOURCE_REGEX.captures(script) {
let repo = matches
.name("repo")
.expect("No repo matched")
.as_str()
.to_owned();
let script_name = matches
.name("script")
.expect("No script name matched")
.as_str()
.to_owned();
let rref = matches.name("ref").map(|rref| rref.as_str().to_owned());
return Ok(Self {
source_type: SourceType::Git,
repo,
script_name,
rref,
action,
});
}
bail!("Script source could not be parsed")
}
fn validate_script_name(&self, config: &Config) -> Result<()> {
if config.require_bash_extension.is_none() && config.require_lib_extension.is_none() {
return Ok(());
}
let expected = match (
&config.require_bash_extension,
&config.require_lib_extension,
&self.action,
) {
(Some(ref ext), _, &ScriptAction::Run) => ext,
(_, Some(ext), &ScriptAction::Import) => ext,
_ => unreachable!(),
};
if!self.script_name.ends_with(expected) {
bail!("Expected script name to end with `{}`", expected);
}
Ok(())
}
async fn fetch_script_contents(&self, config: &config::Config, fresh: bool) -> Result<String> {
let repo = match self.source_type {
SourceType::Saved => config
.repo
.get(&self.repo)
.ok_or(anyhow!("Repo `{}` was not found", &self.repo))?
.box_clone(),
SourceType::Git => git::GitRepo::from_src(&self),
};
let rref = self.rref.clone().unwrap_or("HEAD".to_owned());
Ok(repo.fetch_script(&self.script_name, &rref, fresh).await?)
}
}
async fn validate_api_repo(
uri: &str,
username: Option<String>,
password: Password,
) -> Result<Box<dyn Repo>> {
let mut maybe_parsed: Option<Url> = None;
// Check if we've been given a raw gitlab or github url without scheme
if uri.starts_with("gitlab.com") || uri.starts_with("github.com") {
let with_scheme = format!("https://{}", uri);
maybe_parsed = Some(Url::parse(&with_scheme)?);
}
// Try parsing the url manually otherwise
let mut parsed = match maybe_parsed {
Some(parsed) => parsed,
None => Url::parse(uri)?,
};
if parsed.cannot_be_a_base() {
bail!("Repo URI was not recognized");
}
// Enforce https
let _ = parsed.set_scheme("https");
match parsed.host_str() {
Some("gitlab.com") => Ok(gitlab::fetch_project(&parsed, password).await?),
Some("github.com") => Ok(github::fetch_project(&parsed, username, password).await?),
Some(_) => bail!("No provider recognized for passed URI"),
None => bail!("No host on passed URI"),
}
}
| {
if config.repo.contains_key(&name) {
bail!("A repository with the name `{}` already exists", &name);
}
let password_for_parse = match (password, password_env, password_stdin) {
(Some(pass), _, _) => Password::Saved(pass),
(_, Some(var), _) => Password::FromEnv(var.clone(), env::var(var)?),
(_, _, true) => {
let mut buf = String::new();
io::stdin().read_to_string(&mut buf)?;
Password::Saved(buf)
}
_ => Password::None,
};
let repo = validate_api_repo(&uri, username, password_for_parse).await?;
config.repo.insert(name.clone(), repo);
save_config(&config)
.await | conditional_block |
ledger_manager.rs | use crate::crypto::hash::{H256, Hashable};
use crate::blockchain::Blockchain;
use crate::block::Content;
use crate::transaction::SignedTransaction;
use crate::utxo::UtxoState;
use std::collections::{HashMap, HashSet};
use std::thread;
use std::time::{SystemTime, UNIX_EPOCH, Duration};
use std::sync::{Arc, Mutex};
use statrs::distribution::{Discrete, Poisson, Univariate};
use log::debug;
//state required by ledger-manager
pub struct LedgerManagerState {
pub last_level_processed: u32,
pub leader_sequence: Vec<H256>,
pub proposer_blocks_processed: HashSet<H256>,
pub tx_confirmed: HashSet<H256>,
pub tx_count: usize,
}
//ledger-manager will periodically loop and confirm the transactions
pub struct LedgerManager {
pub ledger_manager_state: LedgerManagerState,
pub blockchain: Arc<Mutex<Blockchain>>,
pub utxo_state: Arc<Mutex<UtxoState>>,
pub voter_depth_k: u32,
}
impl LedgerManager {
pub fn new(blockchain: &Arc<Mutex<Blockchain>>, utxo_state: &Arc<Mutex<UtxoState>>, k: u32) -> Self {
let ledger_manager_state = LedgerManagerState{
last_level_processed: 1,
proposer_blocks_processed: HashSet::new(),
leader_sequence: Vec::new(),
tx_confirmed: HashSet::new(),
tx_count: 0,
};
LedgerManager {
ledger_manager_state: ledger_manager_state,
blockchain: Arc::clone(blockchain),
utxo_state: Arc::clone(utxo_state),
voter_depth_k: k,
}
}
pub fn start(mut self) {
thread::Builder::new()
.name("ledger_manager".to_string())
.spawn(move || {
self.ledger_manager_loop();
})
.unwrap();
}
//Three Steps
//1. Get the leader sequence
//2. Get Transaction sequence
//3. Sanitize Tx and update UTXO state
//All 3 steps are done in the loop
//
fn ledger_manager_loop(&mut self) {
loop{
//Step 1
//let leader_sequence = self.get_leader_sequence();
//This one uses the algorithm described in Prism Paper
let leader_sequence = self.get_confirmed_leader_sequence();
//Step 2
let tx_sequence = self.get_transaction_sequence(&leader_sequence);
//Step 3
self.confirm_transactions(&tx_sequence);
thread::sleep(Duration::from_secs(1));
}
}
fn get_leader_sequence(&mut self) -> Vec<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut leader_sequence: Vec<H256> = vec![];
//TODO: This is a workaround for now till we have some DS which asserts that
//all voter chains at a particular level has voted
// level2votes: how many votes have been casted at level i
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
for level in level_start..level_end {
let proposers = &locked_blockchain.level2allproposers[&level];
let mut max_vote_count = 0;
let mut leader: H256 = [0; 32].into();
//Assumption: if a vote for proposer not present, assumed to be 0
//When above TODO is done, then this will not be needed or modified accordingly
for proposer in proposers {
if locked_blockchain.proposer2votecount.contains_key(proposer) {
let vote_count = locked_blockchain.proposer2votecount[proposer];
if vote_count > max_vote_count {
max_vote_count = vote_count;
leader = *proposer;
}
}
}
//break out as there is no point going forward as no leader found at this level
if max_vote_count == 0 {
break;
}
println!("Adding leader at level {}, leader hash: {:?}, max votes: {}", level, leader, max_vote_count);
leader_sequence.push(leader);
self.ledger_manager_state.leader_sequence.push(leader);
println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
leader_sequence
}
fn get_confirmed_leader_sequence(&mut self) -> Vec<H256> {
let mut leader_sequence: Vec<H256> = vec![];
//Locking Blockchain to get proposer_depth currently. Then dropping the lock
//Will be holding locj for each level processing inside the subroutine
let locked_blockchain = self.blockchain.lock().unwrap();
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
drop(locked_blockchain);
for level in level_start..level_end {
let leader: Option<H256> = self.confirm_leader(level);
match leader {
Some(leader_hash) => {
println!("Adding leader at level {}, leader hash: {:?}", level, leader_hash);
leader_sequence.push(leader_hash);
// self.ledger_manager_state.leader_sequence.push(leader_hash);
// println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
None => {
println!("Unable to confirm leader at level {}", level);
println!("Returning from get_confirmed_leader_sequence func");
break; // TODO: Will this break out of loop??
}
}
}
leader_sequence
}
//we use the confirmation policy from https://arxiv.org/abs/1810.08092
//This function is heavily borrowed from implementation provided in the actual Prism codebase
//https://github.com/yangl1996/prism-rust/
fn confirm_leader(&mut self, level: u32) -> Option<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let proposer_blocks = &locked_blockchain.level2allproposers[&level];
let mut new_leader: Option<H256> = None;
let num_voter_chains: u32 = locked_blockchain.num_voter_chains;
// for each proposer count the number of confirmed votes i.e. votes that are k-deep (2-deep).
let mut num_confirmed_votes: HashMap<H256, u32> = HashMap::new();
for block in proposer_blocks {
if locked_blockchain.proposer2voterinfo.contains_key(block) | }
num_confirmed_votes.insert(*block, total_k_deep_votes);
}
}
for (proposer, votes) in num_confirmed_votes.iter() {
println!("proposer {:?} votes {}", proposer, *votes);
if *votes > (num_voter_chains / 2) {
new_leader = Some(*proposer);
break;
}
}
new_leader
}
// needs to process parent as well
fn get_transaction_sequence(&mut self, leader_sequence: &Vec<H256>) -> Vec<SignedTransaction> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut tx_sequence: Vec<SignedTransaction> = Vec::new();
//TODO: Should we do it recusrively? Like should we also see references to
//proposer references of leader?
//TODO: Also we should refactor it later
for leader in leader_sequence {
let leader_block = &locked_blockchain.proposer_chain[leader].block;
//processing parent and proposer refs
let mut proposer_refs_to_process: Vec<H256> = Vec::new();
let mut leader_txs: Vec<SignedTransaction> = Vec::new();
match &leader_block.content {
Content::Proposer(content) => {
// parent and proposer_refs of leader
let parent = &content.parent_hash;
let proposer_refs = &content.proposer_refs;
if!self.ledger_manager_state.proposer_blocks_processed.contains(parent) {
proposer_refs_to_process.push(*parent);
}
for proposer_ref in proposer_refs {
if!self.ledger_manager_state.proposer_blocks_processed.contains(proposer_ref) {
proposer_refs_to_process.push(*proposer_ref);
}
}
//txs of leader
leader_txs = content.transactions.clone();
}
_ => {
}
}
//TODO: Do we have to do match in this and previous loop as we know it will always
//match to Proposer(content). Can we unwrap??
for proposer_ref in &proposer_refs_to_process {
let proposer_block = &locked_blockchain.proposer_chain[proposer_ref].block;
match &proposer_block.content {
Content::Proposer(content) => {
tx_sequence.append(&mut content.transactions.clone());
}
_ => {
}
}
self.ledger_manager_state.proposer_blocks_processed.insert(*proposer_ref);
}
//appending leader txs finally
//adding leader to proposer_blocks_processed
tx_sequence.append(&mut leader_txs);
self.ledger_manager_state.proposer_blocks_processed.insert(*leader);
}
tx_sequence
}
fn confirm_transactions(&mut self, tx_sequence: &Vec<SignedTransaction>) {
self.ledger_manager_state.tx_count += tx_sequence.len();
// println!("Number of transactions considered yet {}", self.ledger_manager_state.tx_count);
let mut locked_utxostate = self.utxo_state.lock().unwrap();
for tx in tx_sequence {
//if already processed continue
if self.ledger_manager_state.tx_confirmed.contains(&tx.hash()) {
println!("DUPLICATE TXS! Already confirmed");
continue;
}
//check for validity
//if valid, update utxo_state and add to confirmed transactions
if locked_utxostate.is_tx_valid(tx){
locked_utxostate.update_state(tx);
self.ledger_manager_state.tx_confirmed.insert(tx.hash());
println!("Confirmed trans hash {} at {}", tx.hash(), SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_micros());
// Print UTXO state
// locked_utxostate.print();
}
}
drop(locked_utxostate);
}
}
| {
//TODO: We might also need number of voter blocks at a particular level of a voter chain
//This is not urgent as we can **assume**, there is one block at each level
let voters_info = &locked_blockchain.proposer2voterinfo[block];
if voters_info.len() < (num_voter_chains as usize / 2) {
println!("number of votes for {:?} is {}", block, voters_info.len());
continue;
}
let mut total_k_deep_votes: u32 = 0;
for (voter_chain, voter_block) in voters_info {
let voter_block_level = locked_blockchain.voter_chains[(*voter_chain-1) as usize][voter_block].level;
let voter_chain_level = locked_blockchain.voter_depths[(*voter_chain-1) as usize];
let this_vote_depth = voter_chain_level - voter_block_level;
if this_vote_depth >= self.voter_depth_k {
total_k_deep_votes += 1;
} | conditional_block |
ledger_manager.rs | use crate::crypto::hash::{H256, Hashable};
use crate::blockchain::Blockchain;
use crate::block::Content;
use crate::transaction::SignedTransaction;
use crate::utxo::UtxoState;
use std::collections::{HashMap, HashSet};
use std::thread;
use std::time::{SystemTime, UNIX_EPOCH, Duration};
use std::sync::{Arc, Mutex};
use statrs::distribution::{Discrete, Poisson, Univariate};
use log::debug;
//state required by ledger-manager
pub struct | {
pub last_level_processed: u32,
pub leader_sequence: Vec<H256>,
pub proposer_blocks_processed: HashSet<H256>,
pub tx_confirmed: HashSet<H256>,
pub tx_count: usize,
}
//ledger-manager will periodically loop and confirm the transactions
pub struct LedgerManager {
pub ledger_manager_state: LedgerManagerState,
pub blockchain: Arc<Mutex<Blockchain>>,
pub utxo_state: Arc<Mutex<UtxoState>>,
pub voter_depth_k: u32,
}
impl LedgerManager {
pub fn new(blockchain: &Arc<Mutex<Blockchain>>, utxo_state: &Arc<Mutex<UtxoState>>, k: u32) -> Self {
let ledger_manager_state = LedgerManagerState{
last_level_processed: 1,
proposer_blocks_processed: HashSet::new(),
leader_sequence: Vec::new(),
tx_confirmed: HashSet::new(),
tx_count: 0,
};
LedgerManager {
ledger_manager_state: ledger_manager_state,
blockchain: Arc::clone(blockchain),
utxo_state: Arc::clone(utxo_state),
voter_depth_k: k,
}
}
pub fn start(mut self) {
thread::Builder::new()
.name("ledger_manager".to_string())
.spawn(move || {
self.ledger_manager_loop();
})
.unwrap();
}
//Three Steps
//1. Get the leader sequence
//2. Get Transaction sequence
//3. Sanitize Tx and update UTXO state
//All 3 steps are done in the loop
//
fn ledger_manager_loop(&mut self) {
loop{
//Step 1
//let leader_sequence = self.get_leader_sequence();
//This one uses the algorithm described in Prism Paper
let leader_sequence = self.get_confirmed_leader_sequence();
//Step 2
let tx_sequence = self.get_transaction_sequence(&leader_sequence);
//Step 3
self.confirm_transactions(&tx_sequence);
thread::sleep(Duration::from_secs(1));
}
}
fn get_leader_sequence(&mut self) -> Vec<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut leader_sequence: Vec<H256> = vec![];
//TODO: This is a workaround for now till we have some DS which asserts that
//all voter chains at a particular level has voted
// level2votes: how many votes have been casted at level i
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
for level in level_start..level_end {
let proposers = &locked_blockchain.level2allproposers[&level];
let mut max_vote_count = 0;
let mut leader: H256 = [0; 32].into();
//Assumption: if a vote for proposer not present, assumed to be 0
//When above TODO is done, then this will not be needed or modified accordingly
for proposer in proposers {
if locked_blockchain.proposer2votecount.contains_key(proposer) {
let vote_count = locked_blockchain.proposer2votecount[proposer];
if vote_count > max_vote_count {
max_vote_count = vote_count;
leader = *proposer;
}
}
}
//break out as there is no point going forward as no leader found at this level
if max_vote_count == 0 {
break;
}
println!("Adding leader at level {}, leader hash: {:?}, max votes: {}", level, leader, max_vote_count);
leader_sequence.push(leader);
self.ledger_manager_state.leader_sequence.push(leader);
println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
leader_sequence
}
fn get_confirmed_leader_sequence(&mut self) -> Vec<H256> {
let mut leader_sequence: Vec<H256> = vec![];
//Locking Blockchain to get proposer_depth currently. Then dropping the lock
//Will be holding locj for each level processing inside the subroutine
let locked_blockchain = self.blockchain.lock().unwrap();
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
drop(locked_blockchain);
for level in level_start..level_end {
let leader: Option<H256> = self.confirm_leader(level);
match leader {
Some(leader_hash) => {
println!("Adding leader at level {}, leader hash: {:?}", level, leader_hash);
leader_sequence.push(leader_hash);
// self.ledger_manager_state.leader_sequence.push(leader_hash);
// println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
None => {
println!("Unable to confirm leader at level {}", level);
println!("Returning from get_confirmed_leader_sequence func");
break; // TODO: Will this break out of loop??
}
}
}
leader_sequence
}
//we use the confirmation policy from https://arxiv.org/abs/1810.08092
//This function is heavily borrowed from implementation provided in the actual Prism codebase
//https://github.com/yangl1996/prism-rust/
fn confirm_leader(&mut self, level: u32) -> Option<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let proposer_blocks = &locked_blockchain.level2allproposers[&level];
let mut new_leader: Option<H256> = None;
let num_voter_chains: u32 = locked_blockchain.num_voter_chains;
// for each proposer count the number of confirmed votes i.e. votes that are k-deep (2-deep).
let mut num_confirmed_votes: HashMap<H256, u32> = HashMap::new();
for block in proposer_blocks {
if locked_blockchain.proposer2voterinfo.contains_key(block) {
//TODO: We might also need number of voter blocks at a particular level of a voter chain
//This is not urgent as we can **assume**, there is one block at each level
let voters_info = &locked_blockchain.proposer2voterinfo[block];
if voters_info.len() < (num_voter_chains as usize / 2) {
println!("number of votes for {:?} is {}", block, voters_info.len());
continue;
}
let mut total_k_deep_votes: u32 = 0;
for (voter_chain, voter_block) in voters_info {
let voter_block_level = locked_blockchain.voter_chains[(*voter_chain-1) as usize][voter_block].level;
let voter_chain_level = locked_blockchain.voter_depths[(*voter_chain-1) as usize];
let this_vote_depth = voter_chain_level - voter_block_level;
if this_vote_depth >= self.voter_depth_k {
total_k_deep_votes += 1;
}
}
num_confirmed_votes.insert(*block, total_k_deep_votes);
}
}
for (proposer, votes) in num_confirmed_votes.iter() {
println!("proposer {:?} votes {}", proposer, *votes);
if *votes > (num_voter_chains / 2) {
new_leader = Some(*proposer);
break;
}
}
new_leader
}
// needs to process parent as well
fn get_transaction_sequence(&mut self, leader_sequence: &Vec<H256>) -> Vec<SignedTransaction> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut tx_sequence: Vec<SignedTransaction> = Vec::new();
//TODO: Should we do it recusrively? Like should we also see references to
//proposer references of leader?
//TODO: Also we should refactor it later
for leader in leader_sequence {
let leader_block = &locked_blockchain.proposer_chain[leader].block;
//processing parent and proposer refs
let mut proposer_refs_to_process: Vec<H256> = Vec::new();
let mut leader_txs: Vec<SignedTransaction> = Vec::new();
match &leader_block.content {
Content::Proposer(content) => {
// parent and proposer_refs of leader
let parent = &content.parent_hash;
let proposer_refs = &content.proposer_refs;
if!self.ledger_manager_state.proposer_blocks_processed.contains(parent) {
proposer_refs_to_process.push(*parent);
}
for proposer_ref in proposer_refs {
if!self.ledger_manager_state.proposer_blocks_processed.contains(proposer_ref) {
proposer_refs_to_process.push(*proposer_ref);
}
}
//txs of leader
leader_txs = content.transactions.clone();
}
_ => {
}
}
//TODO: Do we have to do match in this and previous loop as we know it will always
//match to Proposer(content). Can we unwrap??
for proposer_ref in &proposer_refs_to_process {
let proposer_block = &locked_blockchain.proposer_chain[proposer_ref].block;
match &proposer_block.content {
Content::Proposer(content) => {
tx_sequence.append(&mut content.transactions.clone());
}
_ => {
}
}
self.ledger_manager_state.proposer_blocks_processed.insert(*proposer_ref);
}
//appending leader txs finally
//adding leader to proposer_blocks_processed
tx_sequence.append(&mut leader_txs);
self.ledger_manager_state.proposer_blocks_processed.insert(*leader);
}
tx_sequence
}
fn confirm_transactions(&mut self, tx_sequence: &Vec<SignedTransaction>) {
self.ledger_manager_state.tx_count += tx_sequence.len();
// println!("Number of transactions considered yet {}", self.ledger_manager_state.tx_count);
let mut locked_utxostate = self.utxo_state.lock().unwrap();
for tx in tx_sequence {
//if already processed continue
if self.ledger_manager_state.tx_confirmed.contains(&tx.hash()) {
println!("DUPLICATE TXS! Already confirmed");
continue;
}
//check for validity
//if valid, update utxo_state and add to confirmed transactions
if locked_utxostate.is_tx_valid(tx){
locked_utxostate.update_state(tx);
self.ledger_manager_state.tx_confirmed.insert(tx.hash());
println!("Confirmed trans hash {} at {}", tx.hash(), SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_micros());
// Print UTXO state
// locked_utxostate.print();
}
}
drop(locked_utxostate);
}
}
| LedgerManagerState | identifier_name |
ledger_manager.rs | use crate::crypto::hash::{H256, Hashable};
use crate::blockchain::Blockchain;
use crate::block::Content;
use crate::transaction::SignedTransaction;
use crate::utxo::UtxoState;
use std::collections::{HashMap, HashSet};
use std::thread;
use std::time::{SystemTime, UNIX_EPOCH, Duration};
use std::sync::{Arc, Mutex};
use statrs::distribution::{Discrete, Poisson, Univariate};
use log::debug;
//state required by ledger-manager
pub struct LedgerManagerState {
pub last_level_processed: u32,
pub leader_sequence: Vec<H256>,
pub proposer_blocks_processed: HashSet<H256>,
pub tx_confirmed: HashSet<H256>,
pub tx_count: usize,
}
//ledger-manager will periodically loop and confirm the transactions
pub struct LedgerManager {
pub ledger_manager_state: LedgerManagerState,
pub blockchain: Arc<Mutex<Blockchain>>,
pub utxo_state: Arc<Mutex<UtxoState>>,
pub voter_depth_k: u32,
}
impl LedgerManager {
pub fn new(blockchain: &Arc<Mutex<Blockchain>>, utxo_state: &Arc<Mutex<UtxoState>>, k: u32) -> Self {
let ledger_manager_state = LedgerManagerState{
last_level_processed: 1,
proposer_blocks_processed: HashSet::new(),
leader_sequence: Vec::new(),
tx_confirmed: HashSet::new(),
tx_count: 0,
};
LedgerManager {
ledger_manager_state: ledger_manager_state,
blockchain: Arc::clone(blockchain),
utxo_state: Arc::clone(utxo_state),
voter_depth_k: k,
}
}
pub fn start(mut self) {
thread::Builder::new()
.name("ledger_manager".to_string())
.spawn(move || {
self.ledger_manager_loop();
})
.unwrap();
}
//Three Steps
//1. Get the leader sequence
//2. Get Transaction sequence
//3. Sanitize Tx and update UTXO state
//All 3 steps are done in the loop
//
fn ledger_manager_loop(&mut self) |
fn get_leader_sequence(&mut self) -> Vec<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut leader_sequence: Vec<H256> = vec![];
//TODO: This is a workaround for now till we have some DS which asserts that
//all voter chains at a particular level has voted
// level2votes: how many votes have been casted at level i
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
for level in level_start..level_end {
let proposers = &locked_blockchain.level2allproposers[&level];
let mut max_vote_count = 0;
let mut leader: H256 = [0; 32].into();
//Assumption: if a vote for proposer not present, assumed to be 0
//When above TODO is done, then this will not be needed or modified accordingly
for proposer in proposers {
if locked_blockchain.proposer2votecount.contains_key(proposer) {
let vote_count = locked_blockchain.proposer2votecount[proposer];
if vote_count > max_vote_count {
max_vote_count = vote_count;
leader = *proposer;
}
}
}
//break out as there is no point going forward as no leader found at this level
if max_vote_count == 0 {
break;
}
println!("Adding leader at level {}, leader hash: {:?}, max votes: {}", level, leader, max_vote_count);
leader_sequence.push(leader);
self.ledger_manager_state.leader_sequence.push(leader);
println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
leader_sequence
}
fn get_confirmed_leader_sequence(&mut self) -> Vec<H256> {
let mut leader_sequence: Vec<H256> = vec![];
//Locking Blockchain to get proposer_depth currently. Then dropping the lock
//Will be holding locj for each level processing inside the subroutine
let locked_blockchain = self.blockchain.lock().unwrap();
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
drop(locked_blockchain);
for level in level_start..level_end {
let leader: Option<H256> = self.confirm_leader(level);
match leader {
Some(leader_hash) => {
println!("Adding leader at level {}, leader hash: {:?}", level, leader_hash);
leader_sequence.push(leader_hash);
// self.ledger_manager_state.leader_sequence.push(leader_hash);
// println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
None => {
println!("Unable to confirm leader at level {}", level);
println!("Returning from get_confirmed_leader_sequence func");
break; // TODO: Will this break out of loop??
}
}
}
leader_sequence
}
//we use the confirmation policy from https://arxiv.org/abs/1810.08092
//This function is heavily borrowed from implementation provided in the actual Prism codebase
//https://github.com/yangl1996/prism-rust/
fn confirm_leader(&mut self, level: u32) -> Option<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let proposer_blocks = &locked_blockchain.level2allproposers[&level];
let mut new_leader: Option<H256> = None;
let num_voter_chains: u32 = locked_blockchain.num_voter_chains;
// for each proposer count the number of confirmed votes i.e. votes that are k-deep (2-deep).
let mut num_confirmed_votes: HashMap<H256, u32> = HashMap::new();
for block in proposer_blocks {
if locked_blockchain.proposer2voterinfo.contains_key(block) {
//TODO: We might also need number of voter blocks at a particular level of a voter chain
//This is not urgent as we can **assume**, there is one block at each level
let voters_info = &locked_blockchain.proposer2voterinfo[block];
if voters_info.len() < (num_voter_chains as usize / 2) {
println!("number of votes for {:?} is {}", block, voters_info.len());
continue;
}
let mut total_k_deep_votes: u32 = 0;
for (voter_chain, voter_block) in voters_info {
let voter_block_level = locked_blockchain.voter_chains[(*voter_chain-1) as usize][voter_block].level;
let voter_chain_level = locked_blockchain.voter_depths[(*voter_chain-1) as usize];
let this_vote_depth = voter_chain_level - voter_block_level;
if this_vote_depth >= self.voter_depth_k {
total_k_deep_votes += 1;
}
}
num_confirmed_votes.insert(*block, total_k_deep_votes);
}
}
for (proposer, votes) in num_confirmed_votes.iter() {
println!("proposer {:?} votes {}", proposer, *votes);
if *votes > (num_voter_chains / 2) {
new_leader = Some(*proposer);
break;
}
}
new_leader
}
// needs to process parent as well
fn get_transaction_sequence(&mut self, leader_sequence: &Vec<H256>) -> Vec<SignedTransaction> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut tx_sequence: Vec<SignedTransaction> = Vec::new();
//TODO: Should we do it recusrively? Like should we also see references to
//proposer references of leader?
//TODO: Also we should refactor it later
for leader in leader_sequence {
let leader_block = &locked_blockchain.proposer_chain[leader].block;
//processing parent and proposer refs
let mut proposer_refs_to_process: Vec<H256> = Vec::new();
let mut leader_txs: Vec<SignedTransaction> = Vec::new();
match &leader_block.content {
Content::Proposer(content) => {
// parent and proposer_refs of leader
let parent = &content.parent_hash;
let proposer_refs = &content.proposer_refs;
if!self.ledger_manager_state.proposer_blocks_processed.contains(parent) {
proposer_refs_to_process.push(*parent);
}
for proposer_ref in proposer_refs {
if!self.ledger_manager_state.proposer_blocks_processed.contains(proposer_ref) {
proposer_refs_to_process.push(*proposer_ref);
}
}
//txs of leader
leader_txs = content.transactions.clone();
}
_ => {
}
}
//TODO: Do we have to do match in this and previous loop as we know it will always
//match to Proposer(content). Can we unwrap??
for proposer_ref in &proposer_refs_to_process {
let proposer_block = &locked_blockchain.proposer_chain[proposer_ref].block;
match &proposer_block.content {
Content::Proposer(content) => {
tx_sequence.append(&mut content.transactions.clone());
}
_ => {
}
}
self.ledger_manager_state.proposer_blocks_processed.insert(*proposer_ref);
}
//appending leader txs finally
//adding leader to proposer_blocks_processed
tx_sequence.append(&mut leader_txs);
self.ledger_manager_state.proposer_blocks_processed.insert(*leader);
}
tx_sequence
}
fn confirm_transactions(&mut self, tx_sequence: &Vec<SignedTransaction>) {
self.ledger_manager_state.tx_count += tx_sequence.len();
// println!("Number of transactions considered yet {}", self.ledger_manager_state.tx_count);
let mut locked_utxostate = self.utxo_state.lock().unwrap();
for tx in tx_sequence {
//if already processed continue
if self.ledger_manager_state.tx_confirmed.contains(&tx.hash()) {
println!("DUPLICATE TXS! Already confirmed");
continue;
}
//check for validity
//if valid, update utxo_state and add to confirmed transactions
if locked_utxostate.is_tx_valid(tx){
locked_utxostate.update_state(tx);
self.ledger_manager_state.tx_confirmed.insert(tx.hash());
println!("Confirmed trans hash {} at {}", tx.hash(), SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_micros());
// Print UTXO state
// locked_utxostate.print();
}
}
drop(locked_utxostate);
}
}
| {
loop{
//Step 1
//let leader_sequence = self.get_leader_sequence();
//This one uses the algorithm described in Prism Paper
let leader_sequence = self.get_confirmed_leader_sequence();
//Step 2
let tx_sequence = self.get_transaction_sequence(&leader_sequence);
//Step 3
self.confirm_transactions(&tx_sequence);
thread::sleep(Duration::from_secs(1));
}
} | identifier_body |
ledger_manager.rs | use crate::crypto::hash::{H256, Hashable};
use crate::blockchain::Blockchain;
use crate::block::Content;
use crate::transaction::SignedTransaction;
use crate::utxo::UtxoState;
use std::collections::{HashMap, HashSet};
use std::thread;
use std::time::{SystemTime, UNIX_EPOCH, Duration};
use std::sync::{Arc, Mutex};
use statrs::distribution::{Discrete, Poisson, Univariate};
use log::debug;
//state required by ledger-manager
pub struct LedgerManagerState {
pub last_level_processed: u32,
pub leader_sequence: Vec<H256>,
pub proposer_blocks_processed: HashSet<H256>,
pub tx_confirmed: HashSet<H256>,
pub tx_count: usize,
}
//ledger-manager will periodically loop and confirm the transactions
pub struct LedgerManager {
pub ledger_manager_state: LedgerManagerState,
pub blockchain: Arc<Mutex<Blockchain>>,
pub utxo_state: Arc<Mutex<UtxoState>>,
pub voter_depth_k: u32,
}
impl LedgerManager {
pub fn new(blockchain: &Arc<Mutex<Blockchain>>, utxo_state: &Arc<Mutex<UtxoState>>, k: u32) -> Self {
let ledger_manager_state = LedgerManagerState{
last_level_processed: 1,
proposer_blocks_processed: HashSet::new(),
leader_sequence: Vec::new(),
tx_confirmed: HashSet::new(),
tx_count: 0,
};
LedgerManager {
ledger_manager_state: ledger_manager_state,
blockchain: Arc::clone(blockchain),
utxo_state: Arc::clone(utxo_state),
voter_depth_k: k,
}
}
pub fn start(mut self) {
thread::Builder::new()
.name("ledger_manager".to_string())
.spawn(move || {
self.ledger_manager_loop();
})
.unwrap();
}
//Three Steps
//1. Get the leader sequence
//2. Get Transaction sequence
//3. Sanitize Tx and update UTXO state
//All 3 steps are done in the loop
//
fn ledger_manager_loop(&mut self) {
loop{
//Step 1
//let leader_sequence = self.get_leader_sequence();
//This one uses the algorithm described in Prism Paper
let leader_sequence = self.get_confirmed_leader_sequence();
//Step 2
let tx_sequence = self.get_transaction_sequence(&leader_sequence);
//Step 3
self.confirm_transactions(&tx_sequence);
thread::sleep(Duration::from_secs(1));
}
}
fn get_leader_sequence(&mut self) -> Vec<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut leader_sequence: Vec<H256> = vec![];
//TODO: This is a workaround for now till we have some DS which asserts that
//all voter chains at a particular level has voted
// level2votes: how many votes have been casted at level i
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
for level in level_start..level_end {
let proposers = &locked_blockchain.level2allproposers[&level];
let mut max_vote_count = 0;
let mut leader: H256 = [0; 32].into();
//Assumption: if a vote for proposer not present, assumed to be 0
//When above TODO is done, then this will not be needed or modified accordingly
for proposer in proposers {
if locked_blockchain.proposer2votecount.contains_key(proposer) {
let vote_count = locked_blockchain.proposer2votecount[proposer];
if vote_count > max_vote_count {
max_vote_count = vote_count;
leader = *proposer;
}
}
}
//break out as there is no point going forward as no leader found at this level
if max_vote_count == 0 {
break;
}
println!("Adding leader at level {}, leader hash: {:?}, max votes: {}", level, leader, max_vote_count);
leader_sequence.push(leader);
self.ledger_manager_state.leader_sequence.push(leader);
println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
leader_sequence
}
fn get_confirmed_leader_sequence(&mut self) -> Vec<H256> {
let mut leader_sequence: Vec<H256> = vec![];
//Locking Blockchain to get proposer_depth currently. Then dropping the lock
//Will be holding locj for each level processing inside the subroutine
let locked_blockchain = self.blockchain.lock().unwrap();
let level_start = self.ledger_manager_state.last_level_processed + 1;
let level_end = locked_blockchain.proposer_depth + 1;
drop(locked_blockchain);
for level in level_start..level_end {
let leader: Option<H256> = self.confirm_leader(level);
match leader {
Some(leader_hash) => {
println!("Adding leader at level {}, leader hash: {:?}", level, leader_hash);
leader_sequence.push(leader_hash);
// self.ledger_manager_state.leader_sequence.push(leader_hash);
// println!("Leader sequence: {:?}", self.ledger_manager_state.leader_sequence);
self.ledger_manager_state.last_level_processed = level;
}
None => {
println!("Unable to confirm leader at level {}", level);
println!("Returning from get_confirmed_leader_sequence func");
break; // TODO: Will this break out of loop??
}
}
}
leader_sequence
}
//we use the confirmation policy from https://arxiv.org/abs/1810.08092
//This function is heavily borrowed from implementation provided in the actual Prism codebase
//https://github.com/yangl1996/prism-rust/
fn confirm_leader(&mut self, level: u32) -> Option<H256> {
let locked_blockchain = self.blockchain.lock().unwrap();
let proposer_blocks = &locked_blockchain.level2allproposers[&level];
let mut new_leader: Option<H256> = None;
let num_voter_chains: u32 = locked_blockchain.num_voter_chains;
// for each proposer count the number of confirmed votes i.e. votes that are k-deep (2-deep).
let mut num_confirmed_votes: HashMap<H256, u32> = HashMap::new();
for block in proposer_blocks {
if locked_blockchain.proposer2voterinfo.contains_key(block) {
//TODO: We might also need number of voter blocks at a particular level of a voter chain
//This is not urgent as we can **assume**, there is one block at each level
let voters_info = &locked_blockchain.proposer2voterinfo[block];
if voters_info.len() < (num_voter_chains as usize / 2) {
println!("number of votes for {:?} is {}", block, voters_info.len());
continue;
}
let mut total_k_deep_votes: u32 = 0;
for (voter_chain, voter_block) in voters_info {
let voter_block_level = locked_blockchain.voter_chains[(*voter_chain-1) as usize][voter_block].level;
let voter_chain_level = locked_blockchain.voter_depths[(*voter_chain-1) as usize];
let this_vote_depth = voter_chain_level - voter_block_level;
if this_vote_depth >= self.voter_depth_k {
total_k_deep_votes += 1;
}
}
num_confirmed_votes.insert(*block, total_k_deep_votes);
}
}
for (proposer, votes) in num_confirmed_votes.iter() {
println!("proposer {:?} votes {}", proposer, *votes);
if *votes > (num_voter_chains / 2) {
new_leader = Some(*proposer);
break;
}
}
new_leader
}
// needs to process parent as well
fn get_transaction_sequence(&mut self, leader_sequence: &Vec<H256>) -> Vec<SignedTransaction> {
let locked_blockchain = self.blockchain.lock().unwrap();
let mut tx_sequence: Vec<SignedTransaction> = Vec::new();
//TODO: Should we do it recusrively? Like should we also see references to
//proposer references of leader?
//TODO: Also we should refactor it later
for leader in leader_sequence {
let leader_block = &locked_blockchain.proposer_chain[leader].block;
//processing parent and proposer refs
let mut proposer_refs_to_process: Vec<H256> = Vec::new();
let mut leader_txs: Vec<SignedTransaction> = Vec::new();
match &leader_block.content {
Content::Proposer(content) => {
// parent and proposer_refs of leader
let parent = &content.parent_hash;
let proposer_refs = &content.proposer_refs;
if!self.ledger_manager_state.proposer_blocks_processed.contains(parent) {
proposer_refs_to_process.push(*parent);
}
for proposer_ref in proposer_refs {
if!self.ledger_manager_state.proposer_blocks_processed.contains(proposer_ref) {
proposer_refs_to_process.push(*proposer_ref);
}
}
//txs of leader
leader_txs = content.transactions.clone();
}
_ => {
}
}
//TODO: Do we have to do match in this and previous loop as we know it will always
//match to Proposer(content). Can we unwrap??
for proposer_ref in &proposer_refs_to_process {
let proposer_block = &locked_blockchain.proposer_chain[proposer_ref].block;
match &proposer_block.content {
Content::Proposer(content) => {
tx_sequence.append(&mut content.transactions.clone());
}
_ => {
}
}
self.ledger_manager_state.proposer_blocks_processed.insert(*proposer_ref);
}
//appending leader txs finally
//adding leader to proposer_blocks_processed
tx_sequence.append(&mut leader_txs); | }
fn confirm_transactions(&mut self, tx_sequence: &Vec<SignedTransaction>) {
self.ledger_manager_state.tx_count += tx_sequence.len();
// println!("Number of transactions considered yet {}", self.ledger_manager_state.tx_count);
let mut locked_utxostate = self.utxo_state.lock().unwrap();
for tx in tx_sequence {
//if already processed continue
if self.ledger_manager_state.tx_confirmed.contains(&tx.hash()) {
println!("DUPLICATE TXS! Already confirmed");
continue;
}
//check for validity
//if valid, update utxo_state and add to confirmed transactions
if locked_utxostate.is_tx_valid(tx){
locked_utxostate.update_state(tx);
self.ledger_manager_state.tx_confirmed.insert(tx.hash());
println!("Confirmed trans hash {} at {}", tx.hash(), SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_micros());
// Print UTXO state
// locked_utxostate.print();
}
}
drop(locked_utxostate);
}
} | self.ledger_manager_state.proposer_blocks_processed.insert(*leader);
}
tx_sequence | random_line_split |
rtc_api.rs | #![allow(dead_code)]
use bitflags::*;
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Copy, Clone)]
pub enum Weekday {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
impl Default for Weekday {
fn default() -> Self { Weekday::Sunday }
}
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)]
pub enum TimeUnits {
Seconds,
Minutes,
Hours,
}
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Copy, Clone, Default)]
pub struct DateTime {
pub seconds: u8,
pub minutes: u8,
pub hours: u8,
pub days: u8,
pub months: u8,
pub years: u8,
pub weekday: Weekday,
}
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Copy, Clone, Default)]
pub struct RtcSessionOffset {
pub rtc_seconds: u64,
pub ticktimer_ms: u64,
}
pub const BLOCKING_I2C_TIMEOUT_MS: u64 = 50;
pub const ABRTCMC_I2C_ADR: u8 = 0x68;
pub const ABRTCMC_CONTROL1: u8 = 0x00;
bitflags! {
pub struct Control1: u8 {
const CORRECTION_INT = 0b0000_0001;
const ALARM_INT = 0b0000_0010;
const SECONDS_INT = 0b0000_0100;
const HR_MODE_12 = 0b0000_1000;
const SOFT_RESET = 0b0001_0000;
const STOP = 0b0010_0000;
}
}
pub const ABRTCMC_CONTROL2: u8 = 0x01;
bitflags! {
pub struct Control2: u8 {
const COUNTDOWN_B_INT = 0b0000_0001;
const COUNTDOWN_A_INT = 0b0000_0010;
const WATCHDOG_A_INT = 0b0000_0100;
const ALARM_HAPPENED = 0b0000_1000;
const SECONDS_HAPPENED= 0b0001_0000;
const COUNTB_HAPPENED = 0b0010_0000;
const COUNTA_HAPPENED = 0b0100_0000;
const WATCHA_HAPPENED = 0b1000_0000;
}
}
pub const ABRTCMC_CONTROL3: u8 = 0x02;
bitflags! {
pub struct Control3: u8 {
const BATTLOW_INT = 0b0000_0001;
const BATTSWITCH_INT = 0b0000_0010;
const BATTLOW_STAT = 0b0000_0100;
const BATTSW_HAPPENED = 0b0000_1000;
const BATT_STD_BL_EN = 0b0000_0000;
const BATT_DIR_BL_EN = 0b0010_0000;
const BATT_DIS_BL_EN = 0b0100_0000;
const BATT_STD_BL_DIS = 0b1000_0000;
const BATT_DIR_BL_DIS = 0b1010_0000;
const BATT_DI_BL_DIS = 0b1110_0000;
}
}
pub const ABRTCMC_SECONDS: u8 = 0x3;
bitflags! {
pub struct Seconds: u8 {
const SECONDS_BCD = 0b0111_1111;
const CORRUPTED = 0b1000_0000;
}
}
pub const ABRTCMC_MINUTES: u8 = 0x4;
// no bitflags, minutes are BCD whole register
pub const ABRTCMC_HOURS: u8 = 0x5;
bitflags! {
pub struct Hours: u8 {
const HR12_HOURS_BCD = 0b0001_1111;
const HR12_PM_FLAG = 0b0010_0000;
const HR24_HOURS_BCD = 0b0011_1111;
}
}
pub const ABRTCMC_DAYS: u8 = 0x6;
// no bitflags, days are BCD whole register
pub const ABRTCMC_WEEKDAYS: u8 = 0x7;
bitflags! {
pub struct Weekdays: u8 {
const SUNDAY = 0b000;
const MONDAY = 0b001;
const TUESDAY = 0b010;
const WEDNESDAY= 0b011;
const THURSDAY = 0b100;
const FRIDAY = 0b101;
const SATURDAY = 0b110;
}
}
pub const ABRTCMC_MONTHS: u8 = 0x8;
bitflags! {
pub struct Months: u8 { // BCD "months"
const JANUARY = 0b0_0001;
const FEBRUARY = 0b0_0010;
const MARCH = 0b0_0011;
const APRIL = 0b0_0100;
const MAY = 0b0_0101;
const JUNE = 0b0_0110;
const JULY = 0b0_0111;
const AUGUST = 0b0_1000;
const SEPTEMBER = 0b0_1001;
const OCTOBER = 0b1_0000;
const NOVEMBER = 0b1_0001;
const DECEMBER = 0b1_0010;
}
}
pub const ABRTCMC_YEARS: u8 = 0x9;
// no bitflags, years are 00-99 in BCD format
pub const ABRTCMC_MINUTE_ALARM: u8 = 0xA;
pub const ABRTCMC_HOUR_ALARM: u8 = 0xB;
pub const ABRTCMC_DAY_ALARM: u8 = 0xC;
pub const ABRTCMC_WEEKDAY_ALARM: u8 = 0xD;
bitflags! {
pub struct Alarm: u8 {
const ENABLE = 0b1000_0000;
// all others code minute/hour/day/weekday in BCD LSBs
const HR12_PM_FLAG = 0b0010_0000; // only used in hours alarm, 12-hour mode
}
}
pub const ABRTCMC_CONFIG: u8 = 0xF;
bitflags! {
pub struct Config: u8 {
const TIMER_B_ENABLE = 0b0000_0001;
const TIMER_A_WATCHDOG = 0b0000_0100;
const TIMER_A_COUNTDWN = 0b0000_0010;
const TIMER_A_DISABLE = 0b0000_0000;
const TIMER_A_DISABLE2 = 0b0000_0110;
const CLKOUT_32768_HZ = 0b0000_0000;
const CLKOUT_16384_HZ = 0b0000_1000;
const CLKOUT_8192_HZ = 0b0001_0000;
const CLKOUT_4096_HZ = 0b0001_1000;
const CLKOUT_1024_HZ = 0b0010_0000;
const CLKOUT_32_HZ = 0b0010_1000;
const CLKOUT_1_HZ = 0b0011_0000;
const CLKOUT_DISABLE = 0b0011_1000;
const TIMERB_INT_PULSED = 0b0100_0000;
const TIMERA_SECONDS_INT_PULSED = 0b1000_0000;
}
}
pub const ABRTCMC_TIMERA_CLK: u8 = 0x10;
pub const ABRTCMC_TIMERB_CLK: u8 = 0x12;
bitflags! {
pub struct TimerClk: u8 {
const CLK_3600_S = 0b0000_0100;
const CLK_60_S = 0b0000_0011;
const CLK_1_S = 0b0000_0010;
const CLK_64_HZ = 0b0000_0001; // 15.625ms
const CLK_4096_HZ = 0b0000_0000; // 0.2441ms
const PULSE_46_MS = 0b0000_0000;
const PULSE_62_MS = 0b0001_0000;
const PULSE_78_MS = 0b0010_0000;
const PULSE_93_MS = 0b0011_0000;
const PULSE_125_MS = 0b0100_0000;
const PULSE_156_MS = 0b0101_0000;
const PULSE_187_MS = 0b0110_0000;
const PULSE_218_MS = 0b0111_0000;
}
}
pub const ABRTCMC_TIMERA: u8 = 0x11;
// no bitflags, register is timer period in seconds, and the period is N / (source clock frequency)
pub const ABRTCMC_TIMERB: u8 = 0x13;
// no bitflags, register is timer period in seconds, and the period is N / (source clock frequency)
/// This function takes the raw &[u8] as returned by the RTC I2C low level read function
/// and converts it to a number of seconds. All hardware RTC readings are based off of the
/// BCD equivalent of Jan 1 2000, 00:00:00, but keep in mind this is just an internal representation.
/// We turn this into a u64 number of seconds because what we really want out of the hardware RTC
/// is _just_ a count of seconds from some arbitrary but fixed start point, that we anchor through other
/// algorithms to UTC.
pub fn | (settings: &[u8]) -> Option<u64> {
const CTL3: usize = 0;
const SECS: usize = 1;
const MINS: usize = 2;
const HOURS: usize = 3;
const DAYS: usize = 4;
// note 5 is skipped - this is weekdays, and is unused
const MONTHS: usize = 6;
const YEARS: usize = 7;
if ((settings[CTL3] & 0xE0)!= crate::RTC_PWR_MODE) // power switchover setting should be initialized
|| (settings[SECS] & 0x80!= 0) { // clock integrity should be guaranteed
log::error!("RTC is in an uninitialized state!, {:x?}", settings);
return None;
}
// this is a secondary check -- I have seen RTC return nonsense time results before
// so this is an extra check above and beyond what's in the datasheet
if (to_binary(settings[SECS]) > 59)
|| (to_binary(settings[MINS]) > 59)
|| (to_binary(settings[HOURS]) > 23) // 24 hour mode is default and assumed
|| (to_binary(settings[DAYS]) > 31) || (to_binary(settings[DAYS]) == 0)
|| (to_binary(settings[MONTHS]) > 12) || (to_binary(settings[MONTHS]) == 0)
|| (to_binary(settings[YEARS]) > 99) {
log::error!("RTC has invalid digits!: {:?}", settings);
return None;
}
let mut total_secs: u64 = 0;
total_secs += to_binary(settings[SECS]) as u64;
total_secs += to_binary(settings[MINS]) as u64 * 60;
total_secs += to_binary(settings[HOURS]) as u64 * 3600;
const SECS_PER_DAY: u64 = 86400;
// DAYS is checked to be 1-31, so, it's safe to subtract 1 here
total_secs += (to_binary(settings[DAYS]) as u64 - 1) * SECS_PER_DAY;
// this will iterate from 0 through 11; december never has an offset added, because its contribution is directly measured in DAYS
for month in 0..to_binary(settings[MONTHS]) {
match month {
0 => total_secs += 0u64,
1 => total_secs += 31u64 * SECS_PER_DAY,
2 => {
// per spec sheet: 1) If the year counter contains a value which is exactly divisible by 4 (including the year 00),
// the AB-RTCMC-32.768kHz-B5ZE-S3 compensates for leap years by adding a 29th day to February.
if (to_binary(settings[YEARS]) % 4) == 0 {
total_secs += 29u64 * SECS_PER_DAY;
} else {
total_secs += 28u64 * SECS_PER_DAY;
};
},
3 => total_secs += 31u64 * SECS_PER_DAY,
4 => total_secs += 30u64 * SECS_PER_DAY,
5 => total_secs += 31u64 * SECS_PER_DAY,
6 => total_secs += 30u64 * SECS_PER_DAY,
7 => total_secs += 31u64 * SECS_PER_DAY,
8 => total_secs += 31u64 * SECS_PER_DAY,
9 => total_secs += 30u64 * SECS_PER_DAY,
10 => total_secs += 31u64 * SECS_PER_DAY,
11 => total_secs += 30u64 * SECS_PER_DAY,
// December shoud never be encountered in this loop since it's right-exclusive
_ => panic!("RTC code has an internal error, months encountered an 'impossible' value"),
}
}
// figure out what the last round multiple of leap years was before the current time
let last_leap = (to_binary(settings[YEARS]) - to_binary(settings[YEARS]) % 4) as u64;
// now add the contributions of all these prior years
total_secs += (last_leap / 4) * (365 * 3 + 366) * SECS_PER_DAY;
// now add the contributions of any years since the last round multiple of leap years
if to_binary(settings[YEARS]) % 4!= 0 {
// account for the leap year
total_secs += 366 * SECS_PER_DAY;
// now account for successive years
total_secs += 365 * (((to_binary(settings[YEARS]) % 4) - 1) as u64) * SECS_PER_DAY;
}
Some(total_secs)
}
pub fn to_binary(bcd: u8) -> u8 {
(bcd & 0xf) + ((bcd >> 4) * 10)
}
| rtc_to_seconds | identifier_name |
rtc_api.rs | #![allow(dead_code)]
use bitflags::*;
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Copy, Clone)]
pub enum Weekday {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
impl Default for Weekday {
fn default() -> Self { Weekday::Sunday }
}
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)]
pub enum TimeUnits {
Seconds,
Minutes,
Hours,
}
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Copy, Clone, Default)]
pub struct DateTime {
pub seconds: u8,
pub minutes: u8,
pub hours: u8,
pub days: u8,
pub months: u8,
pub years: u8,
pub weekday: Weekday,
}
#[derive(Debug, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Copy, Clone, Default)]
pub struct RtcSessionOffset {
pub rtc_seconds: u64,
pub ticktimer_ms: u64,
}
pub const BLOCKING_I2C_TIMEOUT_MS: u64 = 50;
pub const ABRTCMC_I2C_ADR: u8 = 0x68;
pub const ABRTCMC_CONTROL1: u8 = 0x00;
bitflags! {
pub struct Control1: u8 {
const CORRECTION_INT = 0b0000_0001;
const ALARM_INT = 0b0000_0010;
const SECONDS_INT = 0b0000_0100;
const HR_MODE_12 = 0b0000_1000;
const SOFT_RESET = 0b0001_0000;
const STOP = 0b0010_0000;
}
}
pub const ABRTCMC_CONTROL2: u8 = 0x01;
bitflags! {
pub struct Control2: u8 {
const COUNTDOWN_B_INT = 0b0000_0001;
const COUNTDOWN_A_INT = 0b0000_0010;
const WATCHDOG_A_INT = 0b0000_0100;
const ALARM_HAPPENED = 0b0000_1000;
const SECONDS_HAPPENED= 0b0001_0000;
const COUNTB_HAPPENED = 0b0010_0000;
const COUNTA_HAPPENED = 0b0100_0000;
const WATCHA_HAPPENED = 0b1000_0000;
}
}
pub const ABRTCMC_CONTROL3: u8 = 0x02;
bitflags! {
pub struct Control3: u8 {
const BATTLOW_INT = 0b0000_0001;
const BATTSWITCH_INT = 0b0000_0010;
const BATTLOW_STAT = 0b0000_0100;
const BATTSW_HAPPENED = 0b0000_1000;
const BATT_STD_BL_EN = 0b0000_0000;
const BATT_DIR_BL_EN = 0b0010_0000;
const BATT_DIS_BL_EN = 0b0100_0000;
const BATT_STD_BL_DIS = 0b1000_0000;
const BATT_DIR_BL_DIS = 0b1010_0000;
const BATT_DI_BL_DIS = 0b1110_0000;
}
}
pub const ABRTCMC_SECONDS: u8 = 0x3;
bitflags! {
pub struct Seconds: u8 {
const SECONDS_BCD = 0b0111_1111;
const CORRUPTED = 0b1000_0000;
}
}
pub const ABRTCMC_MINUTES: u8 = 0x4;
// no bitflags, minutes are BCD whole register
pub const ABRTCMC_HOURS: u8 = 0x5;
bitflags! {
pub struct Hours: u8 {
const HR12_HOURS_BCD = 0b0001_1111;
const HR12_PM_FLAG = 0b0010_0000;
const HR24_HOURS_BCD = 0b0011_1111;
}
}
pub const ABRTCMC_DAYS: u8 = 0x6;
// no bitflags, days are BCD whole register
pub const ABRTCMC_WEEKDAYS: u8 = 0x7;
bitflags! {
pub struct Weekdays: u8 {
const SUNDAY = 0b000;
const MONDAY = 0b001;
const TUESDAY = 0b010;
const WEDNESDAY= 0b011;
const THURSDAY = 0b100;
const FRIDAY = 0b101;
const SATURDAY = 0b110;
}
}
pub const ABRTCMC_MONTHS: u8 = 0x8;
bitflags! {
pub struct Months: u8 { // BCD "months"
const JANUARY = 0b0_0001;
const FEBRUARY = 0b0_0010;
const MARCH = 0b0_0011;
const APRIL = 0b0_0100;
const MAY = 0b0_0101;
const JUNE = 0b0_0110;
const JULY = 0b0_0111;
const AUGUST = 0b0_1000;
const SEPTEMBER = 0b0_1001;
const OCTOBER = 0b1_0000;
const NOVEMBER = 0b1_0001;
const DECEMBER = 0b1_0010;
}
}
pub const ABRTCMC_YEARS: u8 = 0x9;
// no bitflags, years are 00-99 in BCD format
pub const ABRTCMC_MINUTE_ALARM: u8 = 0xA;
pub const ABRTCMC_HOUR_ALARM: u8 = 0xB;
pub const ABRTCMC_DAY_ALARM: u8 = 0xC;
pub const ABRTCMC_WEEKDAY_ALARM: u8 = 0xD;
bitflags! {
pub struct Alarm: u8 {
const ENABLE = 0b1000_0000;
// all others code minute/hour/day/weekday in BCD LSBs
const HR12_PM_FLAG = 0b0010_0000; // only used in hours alarm, 12-hour mode
}
}
pub const ABRTCMC_CONFIG: u8 = 0xF;
bitflags! {
pub struct Config: u8 {
const TIMER_B_ENABLE = 0b0000_0001;
const TIMER_A_WATCHDOG = 0b0000_0100;
const TIMER_A_COUNTDWN = 0b0000_0010;
const TIMER_A_DISABLE = 0b0000_0000;
const TIMER_A_DISABLE2 = 0b0000_0110;
const CLKOUT_32768_HZ = 0b0000_0000;
const CLKOUT_16384_HZ = 0b0000_1000;
const CLKOUT_8192_HZ = 0b0001_0000;
const CLKOUT_4096_HZ = 0b0001_1000;
const CLKOUT_1024_HZ = 0b0010_0000;
const CLKOUT_32_HZ = 0b0010_1000;
const CLKOUT_1_HZ = 0b0011_0000;
const CLKOUT_DISABLE = 0b0011_1000;
const TIMERB_INT_PULSED = 0b0100_0000;
const TIMERA_SECONDS_INT_PULSED = 0b1000_0000;
}
}
pub const ABRTCMC_TIMERA_CLK: u8 = 0x10;
pub const ABRTCMC_TIMERB_CLK: u8 = 0x12;
bitflags! {
pub struct TimerClk: u8 {
const CLK_3600_S = 0b0000_0100;
const CLK_60_S = 0b0000_0011;
const CLK_1_S = 0b0000_0010;
const CLK_64_HZ = 0b0000_0001; // 15.625ms
const CLK_4096_HZ = 0b0000_0000; // 0.2441ms
const PULSE_46_MS = 0b0000_0000;
const PULSE_62_MS = 0b0001_0000;
const PULSE_78_MS = 0b0010_0000;
const PULSE_93_MS = 0b0011_0000;
const PULSE_125_MS = 0b0100_0000;
const PULSE_156_MS = 0b0101_0000;
const PULSE_187_MS = 0b0110_0000;
const PULSE_218_MS = 0b0111_0000;
}
}
pub const ABRTCMC_TIMERA: u8 = 0x11;
// no bitflags, register is timer period in seconds, and the period is N / (source clock frequency)
pub const ABRTCMC_TIMERB: u8 = 0x13;
// no bitflags, register is timer period in seconds, and the period is N / (source clock frequency)
/// This function takes the raw &[u8] as returned by the RTC I2C low level read function
/// and converts it to a number of seconds. All hardware RTC readings are based off of the
/// BCD equivalent of Jan 1 2000, 00:00:00, but keep in mind this is just an internal representation.
/// We turn this into a u64 number of seconds because what we really want out of the hardware RTC
/// is _just_ a count of seconds from some arbitrary but fixed start point, that we anchor through other
/// algorithms to UTC.
pub fn rtc_to_seconds(settings: &[u8]) -> Option<u64> {
const CTL3: usize = 0;
const SECS: usize = 1;
const MINS: usize = 2;
const HOURS: usize = 3;
const DAYS: usize = 4;
// note 5 is skipped - this is weekdays, and is unused
const MONTHS: usize = 6;
const YEARS: usize = 7;
if ((settings[CTL3] & 0xE0)!= crate::RTC_PWR_MODE) // power switchover setting should be initialized
|| (settings[SECS] & 0x80!= 0) { // clock integrity should be guaranteed
log::error!("RTC is in an uninitialized state!, {:x?}", settings);
return None;
}
// this is a secondary check -- I have seen RTC return nonsense time results before
// so this is an extra check above and beyond what's in the datasheet
if (to_binary(settings[SECS]) > 59)
|| (to_binary(settings[MINS]) > 59)
|| (to_binary(settings[HOURS]) > 23) // 24 hour mode is default and assumed
|| (to_binary(settings[DAYS]) > 31) || (to_binary(settings[DAYS]) == 0)
|| (to_binary(settings[MONTHS]) > 12) || (to_binary(settings[MONTHS]) == 0)
|| (to_binary(settings[YEARS]) > 99) {
log::error!("RTC has invalid digits!: {:?}", settings);
return None;
}
let mut total_secs: u64 = 0;
total_secs += to_binary(settings[SECS]) as u64;
total_secs += to_binary(settings[MINS]) as u64 * 60;
total_secs += to_binary(settings[HOURS]) as u64 * 3600;
const SECS_PER_DAY: u64 = 86400;
// DAYS is checked to be 1-31, so, it's safe to subtract 1 here
total_secs += (to_binary(settings[DAYS]) as u64 - 1) * SECS_PER_DAY;
// this will iterate from 0 through 11; december never has an offset added, because its contribution is directly measured in DAYS
for month in 0..to_binary(settings[MONTHS]) {
match month {
0 => total_secs += 0u64,
1 => total_secs += 31u64 * SECS_PER_DAY,
2 => {
// per spec sheet: 1) If the year counter contains a value which is exactly divisible by 4 (including the year 00),
// the AB-RTCMC-32.768kHz-B5ZE-S3 compensates for leap years by adding a 29th day to February.
if (to_binary(settings[YEARS]) % 4) == 0 {
total_secs += 29u64 * SECS_PER_DAY;
} else {
total_secs += 28u64 * SECS_PER_DAY;
};
},
3 => total_secs += 31u64 * SECS_PER_DAY,
4 => total_secs += 30u64 * SECS_PER_DAY,
5 => total_secs += 31u64 * SECS_PER_DAY,
6 => total_secs += 30u64 * SECS_PER_DAY,
7 => total_secs += 31u64 * SECS_PER_DAY,
8 => total_secs += 31u64 * SECS_PER_DAY,
9 => total_secs += 30u64 * SECS_PER_DAY,
10 => total_secs += 31u64 * SECS_PER_DAY,
11 => total_secs += 30u64 * SECS_PER_DAY,
// December shoud never be encountered in this loop since it's right-exclusive
_ => panic!("RTC code has an internal error, months encountered an 'impossible' value"),
}
}
// figure out what the last round multiple of leap years was before the current time
let last_leap = (to_binary(settings[YEARS]) - to_binary(settings[YEARS]) % 4) as u64;
// now add the contributions of all these prior years
total_secs += (last_leap / 4) * (365 * 3 + 366) * SECS_PER_DAY;
// now add the contributions of any years since the last round multiple of leap years
if to_binary(settings[YEARS]) % 4!= 0 {
// account for the leap year
total_secs += 366 * SECS_PER_DAY;
// now account for successive years
total_secs += 365 * (((to_binary(settings[YEARS]) % 4) - 1) as u64) * SECS_PER_DAY;
}
Some(total_secs) |
pub fn to_binary(bcd: u8) -> u8 {
(bcd & 0xf) + ((bcd >> 4) * 10)
} | } | random_line_split |
mod.rs | //! Extensions to [`Target`](super::Target) which add support for various
//! subsets of the GDB Remote Serial Protocol.
//!
//! ### Note: Missing Protocol Extensions
//!
//! `gdbstub`'s development is guided by the needs of its contributors, with
//! new features being added on an "as-needed" basis.
//!
//! If there's a GDB protocol extensions you're interested in that hasn't been
//! implemented in `gdbstub` yet, (e.g: remote filesystem access, tracepoint
//! support, etc...), consider opening an issue / filing a PR on GitHub!
//!
//! Check out the [GDB Remote Configuration Docs](https://sourceware.org/gdb/onlinedocs/gdb/Remote-Configuration.html)
//! for a table of GDB commands + their corresponding Remote Serial Protocol
//! packets.
//!
//! ## How Protocol Extensions Work - Inlineable Dyn Extension Traits (IDETs)
//!
//! The GDB protocol is massive, and contains all sorts of optional
//! functionality. In the early versions of `gdbstub`, the `Target` trait
//! directly had a method for _every single protocol extension_, which if taken | //! methods!
//!
//! Aside from the cognitive complexity of having so many methods on a single
//! trait, this approach had numerous other drawbacks as well:
//!
//! - Implementations that did not implement all available protocol extensions
//! still had to "pay" for the unused packet parsing/handler code, resulting
//! in substantial code bloat, even on `no_std` platforms.
//! - `GdbStub`'s internal implementation needed to include _runtime_ checks to
//! deal with incorrectly implemented `Target`s.
//! - No way to enforce "mutually-dependent" trait methods at compile-time.
//! - e.g: When implementing hardware breakpoint extensions, targets
//! _must_ implement both the `add_breakpoint` and
//! `remove_breakpoints` methods.
//! - No way to enforce "mutually-exclusive" trait methods at compile-time.
//! - e.g: The `resume` method for single-threaded targets has a much
//! simpler API than for multi-threaded targets, but it would be
//! incorrect for a target to implement both.
//!
//! At first blush, it seems the the solution to all these issues is obvious:
//! simply tie each protocol extension to a `cargo` feature! And yes, while
//! would would indeed work, there would be several serious ergonomic drawbacks:
//!
//! - There would be _hundreds_ of individual feature flags that would need to
//! be toggled by end users.
//! - It would be functionally impossible to _test_ all permutations of
//! enabled/disabled cargo features.
//! - A single binary would need to rely on some [non-trivial `cargo`-fu](https://github.com/rust-lang/cargo/issues/674)
//! in order to have multiple `Target` implementations in a single binary.
//!
//! After much experimentation and iteration, `gdbstub` ended up taking a
//! radically different approach to implementing and enumerating available
//! features, using a technique called **Inlineable Dyn Extension Traits**.
//!
//! > _Author's note:_ As far as I can tell, this isn't a very well-known trick,
//! or at the very least, I've personally never encountered any library that
//! uses this sort of API. As such, I've decided to be a bit cheeky and give it
//! a name! At some point, I'm hoping to write a standalone blog post which
//! further explores this technique, comparing it to other/existing approaches,
//! and diving into details of the how the compiler optimizes this sort of code.
//! In fact, I've already got a [very rough github repo](https://github.com/daniel5151/optional-trait-methods) with some of my
//! findings.
//!
//! So, what are "Inlineable Dyn Extension Traits"? Well, let's break it down:
//!
//! - **Extension Traits** - A common [Rust convention](https://rust-lang.github.io/rfcs/0445-extension-trait-conventions.html#what-is-an-extension-trait)
//! to extend the functionality of a Trait, _without_ modifying the original
//! trait.
//! - **Dyn** - Alludes to the use of Dynamic Dispatch via [Trait Objects](https://doc.rust-lang.org/book/ch17-02-trait-objects.html).
//! - **Inlineable** - Alludes to the fact that this approach can be easily
//! inlined, making it a truly zero-cost abstraction.
//!
//! In a nutshell, Inlineable Dyn Extension Traits (or IDETs) are an abuse of
//! the Rust trait system + modern compiler optimizations to emulate zero-cost,
//! runtime-enumerable optional trait methods!
//!
//! #### Technical overview
//!
//! The basic principles behind Inlineable Dyn Extension Traits are best
//! explained though example:
//!
//! Lets say we want to add an optional protocol extension described by an
//! `ProtocolExt` trait to a base `Protocol` trait. How would we do that using
//! IDETs?
//!
//! - (library) Define a `trait ProtocolExt: Protocol {... }` which includes
//! all the methods required by the protocol extension:
//! - _Note:_ Making `ProtocolExt` a subtrait of `Protocol` is not strictly
//! required, but it does enable transparently using `Protocol`'s
//! associated types as part of `ProtocolExt`'s method definitions.
//!
//! ```rust,ignore
//! /// `foo` and `bar` are mutually-dependent methods.
//! trait ProtocolExt: Protocol {
//! fn foo(&self);
//! // can use associated types in method signature!
//! fn bar(&mut self) -> Result<(), Self::Error>;
//! }
//! ```
//!
//! - (library) "Associate" the `ProtocolExt` extension trait to the original
//! `Protocol` trait by adding a new `Protocol` method that "downcasts" `self`
//! into a `&mut dyn ProtocolExt`.
//!
//! ```rust,ignore
//! trait Protocol {
//! //... other methods...
//!
//! // Optional extension
//! #[inline(always)]
//! fn get_protocol_ext(&mut self) -> Option<ProtocolExtOps<Self>> {
//! // disabled by default
//! None
//! }
//!
//! // Mutually-exclusive extensions
//! fn get_ext_a_or_b(&mut self) -> EitherOrExt<Self::Arch, Self::Error>;
//! }
//!
//! // Using a typedef for readability
//! type ProtocolExtOps<T> =
//! &'a mut dyn ProtocolExt<Arch = <T as Protocol>::Arch, Error = <T as Protocol>::Error>;
//!
//! enum EitherOrExt<A, E> {
//! ProtocolExtA(&'a mut dyn ProtocolExtA<Arch = A, Error = E>),
//! ProtocolExtB(&'a mut dyn ProtocolExtB<Arch = A, Error = E>),
//! }
//! ```
//!
//! - (user) Implements the `ProtocolExt` extension for their target (just like
//! a normal trait).
//!
//! ```rust,ignore
//! impl ProtocolExt for MyTarget {
//! fn foo(&self) {... }
//! fn bar(&mut self) -> Result<(), Self::Error> {... }
//! }
//! ```
//!
//! - (user) Implements the base `Protocol` trait, overriding the
//! `get_protocol_ext` method to return `Some(self)`, which will effectively
//! "enable" the extension.
//!
//! ```rust,ignore
//! impl Protocol for MyTarget {
//! // Optional extension
//! #[inline(always)]
//! fn get_protocol_ext(&mut self) -> Option<ProtocolExtOps<Self>> {
//! Some(self) // will not compile unless `MyTarget` also implements `ProtocolExt`
//! }
//!
//! // Mutually-exclusive extensions
//! #[inline(always)]
//! fn get_ext_a_or_b(&mut self) -> EitherOrExt<Self::Arch, Self::Error> {
//! EitherOrExt::ProtocolExtA(self)
//! }
//! }
//! ```
//!
//! > Please note the use of `#[inline(always)]` when enabling IDET methods.
//! While LLVM is usually smart enough to inline single-level IDETs (such as in
//! the example above), nested IDETs will often require a bit of "help" from the
//! `inline` directive to be correctly optimized.
//!
//! Now, here's where IDETs really shine: If the user didn't implement
//! `ProtocolExt`, but _did_ try to enable the feature by overriding
//! `get_protocol_ext` to return `Some(self)`, they'll get a compile-time error
//! that looks something like this:
//!
//! ```text
//! error[E0277]: the trait bound `MyTarget: ProtocolExt` is not satisfied
//! --> path/to/implementation.rs:44:14
//! |
//! 44 | Some(self)
//! | ^^^^ the trait `ProtocolExt` is not implemented for `MyTarget`
//! |
//! = note: required for the cast to the object type `dyn ProtocolExt<Arch =..., Error =...>`
//! ```
//!
//! The Rust compiler is preventing you from enabling a feature you haven't
//! implemented _at compile time!_
//!
//! - (library) Is able to _query_ whether or not an extension is available,
//! _without_ having to actually invoke any method on the target!
//!
//! ```rust,ignore
//! fn execute_protocol(mut target: impl Target) {
//! match target.get_protocol_ext() {
//! Some(ops) => ops.foo(),
//! None => { /* fallback when not enabled */ }
//! }
//! }
//! ```
//!
//! This is already pretty cool, but what's _even cooler_ is that if you take a
//! look at the generated assembly of a monomorphized `execute_protocol` method
//! (e.g: using godbolt.org), you'll find that the compiler is able to
//! efficiently inline and devirtualize _all_ the calls to `get_protocol_ext`
//! method, which in-turn allows the dead-code-eliminator to work its magic, and
//! remove the unused branches from the generated code! i.e: If a target
//! implemention didn't implement the `ProtocolExt` extension, then that `match`
//! statement in `execute_protocol` would simply turn into a noop!
//!
//! If IDETs are something you're interested in, consider checking out
//! [daniel5151/optional-trait-methods](https://github.com/daniel5151/optional-trait-methods)
//! for some sample code that shows off the power of IDETs. It's not
//! particularly polished, but it does includes code snippets which can be
//! pasted into godbolt.org directly to confirm the optimizations described
//! above, and a brief writeup which compares / contrasts alternatives to IDETs.
//!
//! Long story short: Optimizing compilers really are magic!
//!
//! #### Summary: The Benefits of IDETs
//!
//! IDETs solve the numerous issues and shortcomings that arise from the
//! traditional single trait + "optional" methods approach:
//!
//! - **Compile-time enforcement of mutually-dependent methods**
//! - By grouping mutually-dependent methods behind a single extension trait
//! and marking them all as required methods, the Rust compiler is able to
//! catch missing mutually-dependent methods at compile time, with no need
//! for any runtime checks!
//! - **Compile-time enforcement of mutually-exclusive methods**
//! - By grouping mutually-exclusive methods behind two extension traits, and
//! wrapping those in an `enum`, the API is able to document
//! mutually-exclusive functions _at the type-level_, in-turn enabling the
//! library to omit any runtime checks!
//! - _Note:_ Strictly speaking, this isn't really compile time
//! "enforcement", as there's nothing stopping an "adversarial"
//! implementation from implementing both sets of methods, and then
//! "flipping" between the two at runtime. Nonetheless, it serves as a good
//! guardrail.
//! - **Enforce dead-code-elimination _without_ `cargo` feature flags**
//! - This is a really awesome trick: by wrapping code in a `if
//! target.get_protocol_ext().is_some()` block, it's possible to specify
//! _arbitrary_ blocks of code to be feature-dependent!
//! - This is used to great effect in `gdbstub` to optimize-out any packet
//! parsing / handler code for unimplemented protocol extensions.
macro_rules! doc_comment {
($x:expr, $($tt:tt)*) => {
#[doc = $x]
$($tt)*
};
}
macro_rules! define_ext {
($extname:ident, $exttrait:ident) => {
doc_comment! {
concat!("See [`", stringify!($exttrait), "`](trait.", stringify!($exttrait), ".html)."),
pub type $extname<'a, T> =
&'a mut dyn $exttrait<Arch = <T as Target>::Arch, Error = <T as Target>::Error>;
}
};
}
pub mod base;
pub mod breakpoints;
pub mod catch_syscalls;
pub mod extended_mode;
pub mod memory_map;
pub mod monitor_cmd;
pub mod section_offsets;
pub mod target_description_xml_override; | //! to the extreme, would have resulted in literally _hundreds_ of associated | random_line_split |
main.rs | extern crate cgmath;
extern crate euclid;
extern crate gleam;
extern crate glutin;
extern crate image;
extern crate lodepng;
extern crate offscreen_gl_context;
#[macro_use]
extern crate vulkano;
extern crate vulkano_win;
extern crate winit;
use euclid::Size2D;
use gleam::gl;
use offscreen_gl_context::{ColorAttachmentType, GLContext, GLContextAttributes, NativeGLContext};
use std::time::Duration;
use vulkano_win::VkSurfaceBuild;
use std::cell::Cell;
struct GlHandles {
pub vao: Cell<gl::GLuint>,
pub vbos: Cell<[gl::GLuint; 2]>,
pub program: Cell<gl::GLuint>,
pub scale: Cell<f32>,
}
impl GlHandles {
fn new() -> GlHandles {
GlHandles {
vao: Cell::new(0 as gl::GLuint),
vbos: Cell::new([0,0]),
program: Cell::new(0 as gl::GLuint),
scale: Cell::new(0.0),
}
}
}
const CLEAR_COLOR: (f32, f32, f32, f32) = (0.0, 0.2, 0.3, 1.0);
const WIN_WIDTH: i32 = 256;
const WIN_HEIGTH: i32 = 256;
const VS_SHADER: &'static str = "
#version 150 core
in vec2 a_Pos;
in vec3 a_Color;
uniform mat4 scale;
out vec4 v_Color;
void main() { | gl_Position = scale * vec4(0.5 * a_Pos, 0.0, 1.0);
}
";
const FS_SHADER: &'static str = "
#version 150 core
in vec4 v_Color;
out vec4 Target0;
void main() {
Target0 = v_Color;
}
";
const VERTICES: &'static [[f32;2];3] = &[
[-1.0, -0.57],
[ 1.0, -0.57],
[ 0.0, 1.15]
];
const COLORS: &'static [[f32;3];3] = &[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
];
fn add_shader(program: gl::GLuint, src: &str, ty: gl::GLenum) {
let id = unsafe { gl::CreateShader(ty) };
if id == (0 as gl::GLuint) {
panic!("Failed to create shader type: {:?}", ty);
}
let mut source = Vec::new();
source.extend_from_slice(src.as_bytes());
gl::shader_source(id, &[&source[..]]);
gl::compile_shader(id);
let log = gl::get_shader_info_log(id);
if gl::get_shader_iv(id, gl::COMPILE_STATUS) == (0 as gl::GLint) {
panic!("Failed to compile shader:\n{}", log);
} else {
if!log.is_empty() {
println!("Warnings detected on shader:\n{}", log);
}
gl::attach_shader(program, id);
}
}
fn compile_shaders(handles: &GlHandles) {
handles.program.set(gl::create_program());
if handles.program.get() == (0 as gl::GLuint) {
panic!("Failed to create shader program");
}
add_shader(handles.program.get(), VS_SHADER, gl::VERTEX_SHADER);
add_shader(handles.program.get(), FS_SHADER, gl::FRAGMENT_SHADER);
gl::link_program(handles.program.get());
if gl::get_program_iv(handles.program.get(), gl::LINK_STATUS) == (0 as gl::GLint) {
let error_log = gl::get_program_info_log(handles.program.get());
panic!("Failed to link shader program: \n{}", error_log);
}
gl::validate_program(handles.program.get());
if gl::get_program_iv(handles.program.get(), gl::VALIDATE_STATUS) == (0 as gl::GLint) {
let error_log = gl::get_program_info_log(handles.program.get());
panic!("Failed to validate shader program: \n{}", error_log);
}
gl::use_program(handles.program.get());
}
fn gl_draw(handles: &GlHandles) {
gl::clear_color(CLEAR_COLOR.0, CLEAR_COLOR.1, CLEAR_COLOR.2, CLEAR_COLOR.3);
gl::clear(gl::COLOR_BUFFER_BIT);
gl::bind_buffer(gl::ARRAY_BUFFER, handles.vbos.get()[0]);
gl::buffer_data(gl::ARRAY_BUFFER, VERTICES, gl::STATIC_DRAW);
gl::vertex_attrib_pointer(0 as gl::GLuint,
2,
gl::FLOAT,
false,
0 as gl::GLint,
0);
gl::enable_vertex_attrib_array(0 as gl::GLuint);
gl::bind_buffer(gl::ARRAY_BUFFER, handles.vbos.get()[1]);
gl::buffer_data(gl::ARRAY_BUFFER, COLORS, gl::STATIC_DRAW);
gl::vertex_attrib_pointer(1 as gl::GLuint,
3,
gl::FLOAT,
false,
0 as gl::GLint,
0);
gl::enable_vertex_attrib_array(1 as gl::GLuint);
handles.scale.set(handles.scale.get() + 0.01);
let scale = handles.scale.get();
let rot_matrix = [
scale.cos(), -1.0 * scale.sin(), 0.0, 0.0,
scale.sin(), scale.cos(), 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
];
let scale_pos = gl::get_uniform_location(handles.program.get(), "scale");
gl::uniform_matrix_4fv(scale_pos, false, &rot_matrix);
gl::draw_arrays(gl::TRIANGLES, 0, 3);
}
pub fn main() {
gl::load_with(|s| GLContext::<NativeGLContext>::get_proc_address(s) as *const _);
let offscreen_ctx = GLContext::<NativeGLContext>::new(Size2D::new(256, 256),
GLContextAttributes::default(),
ColorAttachmentType::Renderbuffer,
None).unwrap();
offscreen_ctx.make_current().unwrap();
let handles: GlHandles = GlHandles::new();
// Create VAO and VBOs
handles.vao.set(gl::gen_vertex_arrays(1)[0]);
gl::bind_vertex_array(handles.vao.get());
let buffer_ids = gl::gen_buffers(2);
handles.vbos.set([buffer_ids[0], buffer_ids[1]]);
compile_shaders(&handles);
// Create FBO and bind it
let fbo = gl::gen_framebuffers(1)[0];
gl::bind_framebuffer(gl::FRAMEBUFFER, fbo);
let extensions = vulkano_win::required_extensions();
let instance = vulkano::instance::Instance::new(None, &extensions, &[]).expect("failed to create instance");
let physical = vulkano::instance::PhysicalDevice::enumerate(&instance)
.next().expect("no device available");
println!("Using device: {} (type: {:?})", physical.name(), physical.ty());
let window = winit::WindowBuilder::new().build_vk_surface(&instance).unwrap();
let queue = physical.queue_families().find(|q| q.supports_graphics() &&
window.surface().is_supported(q).unwrap_or(false))
.expect("couldn't find a graphical queue family");
let device_ext = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let (device, mut queues) = vulkano::device::Device::new(&physical, physical.supported_features(),
&device_ext, [(queue, 0.5)].iter().cloned())
.expect("failed to create device");
let queue = queues.next().unwrap();
let (swapchain, images) = {
let caps = window.surface().get_capabilities(&physical).expect("failed to get surface capabilities");
let dimensions = caps.current_extent.unwrap_or([WIN_WIDTH as u32, WIN_HEIGTH as u32]);
let present = caps.present_modes.iter().next().unwrap();
let usage = caps.supported_usage_flags;
vulkano::swapchain::Swapchain::new(&device, &window.surface(), caps.min_image_count,
vulkano::format::B8G8R8A8Srgb, dimensions, 1,
&usage, &queue, vulkano::swapchain::SurfaceTransform::Identity,
vulkano::swapchain::CompositeAlpha::Opaque,
present, true, None).expect("failed to create swapchain")
};
#[derive(Debug, Clone)]
struct Vertex { position: [f32; 2] }
impl_vertex!(Vertex, position);
let vertex_buffer = vulkano::buffer::cpu_access::CpuAccessibleBuffer::<[Vertex]>
::from_iter(&device, &vulkano::buffer::BufferUsage::all(),
Some(queue.family()), [
Vertex { position: [-0.5, -0.5 ] },
Vertex { position: [-0.5, 0.5 ] },
Vertex { position: [ 0.5, -0.5 ] },
Vertex { position: [ 0.5, 0.5 ] },
].iter().cloned()).expect("failed to create buffer");
mod vs { include!{concat!(env!("OUT_DIR"), "/shaders/src/shader/image_vs.glsl")} }
let vs = vs::Shader::load(&device).expect("failed to create shader module");
mod fs { include!{concat!(env!("OUT_DIR"), "/shaders/src/shader/image_fs.glsl")} }
let fs = fs::Shader::load(&device).expect("failed to create shader module");
mod renderpass {
single_pass_renderpass!{
attachments: {
color: {
load: Clear,
store: Store,
format: ::vulkano::format::B8G8R8A8Srgb,
}
},
pass: {
color: [color],
depth_stencil: {}
}
}
}
let renderpass = renderpass::CustomRenderPass::new(&device, &renderpass::Formats {
color: (vulkano::format::B8G8R8A8Srgb, 1)
}).unwrap();
let texture = vulkano::image::immutable::ImmutableImage::new(&device, vulkano::image::Dimensions::Dim2d { width: WIN_WIDTH as u32, height: WIN_HEIGTH as u32 },
vulkano::format::R8G8B8A8Unorm, Some(queue.family())).unwrap();
let sampler = vulkano::sampler::Sampler::new(&device, vulkano::sampler::Filter::Linear,
vulkano::sampler::Filter::Linear, vulkano::sampler::MipmapMode::Nearest,
vulkano::sampler::SamplerAddressMode::Repeat,
vulkano::sampler::SamplerAddressMode::Repeat,
vulkano::sampler::SamplerAddressMode::Repeat,
0.0, 1.0, 0.0, 0.0).unwrap();
let descriptor_pool = vulkano::descriptor::descriptor_set::DescriptorPool::new(&device);
mod pipeline_layout {
pipeline_layout!{
set0: {
tex: CombinedImageSampler
}
}
}
let pipeline_layout = pipeline_layout::CustomPipeline::new(&device).unwrap();
let set = pipeline_layout::set0::Set::new(&descriptor_pool, &pipeline_layout, &pipeline_layout::set0::Descriptors {
tex: (&sampler, &texture)
});
let pipeline = vulkano::pipeline::GraphicsPipeline::new(&device, vulkano::pipeline::GraphicsPipelineParams {
vertex_input: vulkano::pipeline::vertex::SingleBufferDefinition::new(),
vertex_shader: vs.main_entry_point(),
input_assembly: vulkano::pipeline::input_assembly::InputAssembly {
topology: vulkano::pipeline::input_assembly::PrimitiveTopology::TriangleStrip,
primitive_restart_enable: false,
},
tessellation: None,
geometry_shader: None,
viewport: vulkano::pipeline::viewport::ViewportsState::Fixed {
data: vec![(
vulkano::pipeline::viewport::Viewport {
origin: [0.0, 0.0],
depth_range: 0.0.. 1.0,
dimensions: [images[0].dimensions()[0] as f32, images[0].dimensions()[1] as f32],
},
vulkano::pipeline::viewport::Scissor::irrelevant()
)],
},
raster: Default::default(),
multisample: vulkano::pipeline::multisample::Multisample::disabled(),
fragment_shader: fs.main_entry_point(),
depth_stencil: vulkano::pipeline::depth_stencil::DepthStencil::disabled(),
blend: vulkano::pipeline::blend::Blend::pass_through(),
layout: &pipeline_layout,
render_pass: vulkano::framebuffer::Subpass::from(&renderpass, 0).unwrap(),
}).unwrap();
let framebuffers = images.iter().map(|image| {
let attachments = renderpass::AList {
color: &image,
};
vulkano::framebuffer::Framebuffer::new(&renderpass, [images[0].dimensions()[0], images[0].dimensions()[1], 1], attachments).unwrap()
}).collect::<Vec<_>>();
'main: loop {
// Draw in the FBO and read the pixel data from it
gl_draw(&handles);
// Create renderbuffer for color data
let color_render_buffer = gl::gen_renderbuffers(1)[0];
gl::bind_renderbuffer(gl::RENDERBUFFER, color_render_buffer);
gl::renderbuffer_storage(gl::RENDERBUFFER, gl::RGBA, WIN_WIDTH, WIN_HEIGTH);
gl::framebuffer_renderbuffer(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::RENDERBUFFER, color_render_buffer);
if gl::check_frame_buffer_status(gl::FRAMEBUFFER)!= gl::FRAMEBUFFER_COMPLETE {
panic!("Something went wrong :/.");
}
let pixel_data = gl::read_pixels(0, 0, WIN_WIDTH, WIN_HEIGTH, gl::RGBA, gl::UNSIGNED_BYTE);
let pixel_buffer = {
let image_data_chunks = pixel_data.chunks(4).map(|c| [c[0], c[1], c[2], c[3]]);
vulkano::buffer::cpu_access::CpuAccessibleBuffer::<[[u8;4]]>
::from_iter(&device, &vulkano::buffer::BufferUsage::all(),
Some(queue.family()), image_data_chunks)
.expect("failed to create buffer")
};
let command_buffers = framebuffers.iter().map(|framebuffer| {
vulkano::command_buffer::PrimaryCommandBufferBuilder::new(&device, queue.family())
.copy_buffer_to_color_image(&pixel_buffer, &texture, 0, 0.. 1, [0, 0, 0],
[texture.dimensions().width(), texture.dimensions().height(), 1])
//.clear_color_image(&texture, [0.0, 1.0, 0.0, 1.0])
.draw_inline(&renderpass, &framebuffer, renderpass::ClearValues {
color: [0.0, 0.0, 1.0, 1.0]
})
.draw(&pipeline, &vertex_buffer, &vulkano::command_buffer::DynamicState::none(),
&set, &())
.draw_end()
.build()
}).collect::<Vec<_>>();
let image_num = swapchain.acquire_next_image(Duration::new(10, 0)).unwrap();
vulkano::command_buffer::submit(&command_buffers[image_num], &queue).unwrap();
swapchain.present(&queue, image_num).unwrap();
for ev in window.window().poll_events() {
match ev {
winit::Event::Closed => break'main,
_ => ()
}
}
}
// Free gl resources
gl::use_program(0);
gl::disable_vertex_attrib_array(0);
gl::disable_vertex_attrib_array(1);
gl::disable_vertex_attrib_array(2);
// gl::detach_shader(handles.program.get(), vertexshader);
// gl::detach_shader(handles.program.get(), fragmentshader);
// gl::delete_shader(vertexshader);
// gl::delete_shader(fragmentshader);
gl::delete_program(handles.program.get());
gl::delete_buffers(&handles.vbos.get());
gl::delete_vertex_arrays(&[handles.vao.get()]);
} | v_Color = vec4(a_Color, 1.0); | random_line_split |
main.rs | extern crate cgmath;
extern crate euclid;
extern crate gleam;
extern crate glutin;
extern crate image;
extern crate lodepng;
extern crate offscreen_gl_context;
#[macro_use]
extern crate vulkano;
extern crate vulkano_win;
extern crate winit;
use euclid::Size2D;
use gleam::gl;
use offscreen_gl_context::{ColorAttachmentType, GLContext, GLContextAttributes, NativeGLContext};
use std::time::Duration;
use vulkano_win::VkSurfaceBuild;
use std::cell::Cell;
struct GlHandles {
pub vao: Cell<gl::GLuint>,
pub vbos: Cell<[gl::GLuint; 2]>,
pub program: Cell<gl::GLuint>,
pub scale: Cell<f32>,
}
impl GlHandles {
fn | () -> GlHandles {
GlHandles {
vao: Cell::new(0 as gl::GLuint),
vbos: Cell::new([0,0]),
program: Cell::new(0 as gl::GLuint),
scale: Cell::new(0.0),
}
}
}
const CLEAR_COLOR: (f32, f32, f32, f32) = (0.0, 0.2, 0.3, 1.0);
const WIN_WIDTH: i32 = 256;
const WIN_HEIGTH: i32 = 256;
const VS_SHADER: &'static str = "
#version 150 core
in vec2 a_Pos;
in vec3 a_Color;
uniform mat4 scale;
out vec4 v_Color;
void main() {
v_Color = vec4(a_Color, 1.0);
gl_Position = scale * vec4(0.5 * a_Pos, 0.0, 1.0);
}
";
const FS_SHADER: &'static str = "
#version 150 core
in vec4 v_Color;
out vec4 Target0;
void main() {
Target0 = v_Color;
}
";
const VERTICES: &'static [[f32;2];3] = &[
[-1.0, -0.57],
[ 1.0, -0.57],
[ 0.0, 1.15]
];
const COLORS: &'static [[f32;3];3] = &[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
];
fn add_shader(program: gl::GLuint, src: &str, ty: gl::GLenum) {
let id = unsafe { gl::CreateShader(ty) };
if id == (0 as gl::GLuint) {
panic!("Failed to create shader type: {:?}", ty);
}
let mut source = Vec::new();
source.extend_from_slice(src.as_bytes());
gl::shader_source(id, &[&source[..]]);
gl::compile_shader(id);
let log = gl::get_shader_info_log(id);
if gl::get_shader_iv(id, gl::COMPILE_STATUS) == (0 as gl::GLint) {
panic!("Failed to compile shader:\n{}", log);
} else {
if!log.is_empty() {
println!("Warnings detected on shader:\n{}", log);
}
gl::attach_shader(program, id);
}
}
fn compile_shaders(handles: &GlHandles) {
handles.program.set(gl::create_program());
if handles.program.get() == (0 as gl::GLuint) {
panic!("Failed to create shader program");
}
add_shader(handles.program.get(), VS_SHADER, gl::VERTEX_SHADER);
add_shader(handles.program.get(), FS_SHADER, gl::FRAGMENT_SHADER);
gl::link_program(handles.program.get());
if gl::get_program_iv(handles.program.get(), gl::LINK_STATUS) == (0 as gl::GLint) {
let error_log = gl::get_program_info_log(handles.program.get());
panic!("Failed to link shader program: \n{}", error_log);
}
gl::validate_program(handles.program.get());
if gl::get_program_iv(handles.program.get(), gl::VALIDATE_STATUS) == (0 as gl::GLint) {
let error_log = gl::get_program_info_log(handles.program.get());
panic!("Failed to validate shader program: \n{}", error_log);
}
gl::use_program(handles.program.get());
}
fn gl_draw(handles: &GlHandles) {
gl::clear_color(CLEAR_COLOR.0, CLEAR_COLOR.1, CLEAR_COLOR.2, CLEAR_COLOR.3);
gl::clear(gl::COLOR_BUFFER_BIT);
gl::bind_buffer(gl::ARRAY_BUFFER, handles.vbos.get()[0]);
gl::buffer_data(gl::ARRAY_BUFFER, VERTICES, gl::STATIC_DRAW);
gl::vertex_attrib_pointer(0 as gl::GLuint,
2,
gl::FLOAT,
false,
0 as gl::GLint,
0);
gl::enable_vertex_attrib_array(0 as gl::GLuint);
gl::bind_buffer(gl::ARRAY_BUFFER, handles.vbos.get()[1]);
gl::buffer_data(gl::ARRAY_BUFFER, COLORS, gl::STATIC_DRAW);
gl::vertex_attrib_pointer(1 as gl::GLuint,
3,
gl::FLOAT,
false,
0 as gl::GLint,
0);
gl::enable_vertex_attrib_array(1 as gl::GLuint);
handles.scale.set(handles.scale.get() + 0.01);
let scale = handles.scale.get();
let rot_matrix = [
scale.cos(), -1.0 * scale.sin(), 0.0, 0.0,
scale.sin(), scale.cos(), 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
];
let scale_pos = gl::get_uniform_location(handles.program.get(), "scale");
gl::uniform_matrix_4fv(scale_pos, false, &rot_matrix);
gl::draw_arrays(gl::TRIANGLES, 0, 3);
}
pub fn main() {
gl::load_with(|s| GLContext::<NativeGLContext>::get_proc_address(s) as *const _);
let offscreen_ctx = GLContext::<NativeGLContext>::new(Size2D::new(256, 256),
GLContextAttributes::default(),
ColorAttachmentType::Renderbuffer,
None).unwrap();
offscreen_ctx.make_current().unwrap();
let handles: GlHandles = GlHandles::new();
// Create VAO and VBOs
handles.vao.set(gl::gen_vertex_arrays(1)[0]);
gl::bind_vertex_array(handles.vao.get());
let buffer_ids = gl::gen_buffers(2);
handles.vbos.set([buffer_ids[0], buffer_ids[1]]);
compile_shaders(&handles);
// Create FBO and bind it
let fbo = gl::gen_framebuffers(1)[0];
gl::bind_framebuffer(gl::FRAMEBUFFER, fbo);
let extensions = vulkano_win::required_extensions();
let instance = vulkano::instance::Instance::new(None, &extensions, &[]).expect("failed to create instance");
let physical = vulkano::instance::PhysicalDevice::enumerate(&instance)
.next().expect("no device available");
println!("Using device: {} (type: {:?})", physical.name(), physical.ty());
let window = winit::WindowBuilder::new().build_vk_surface(&instance).unwrap();
let queue = physical.queue_families().find(|q| q.supports_graphics() &&
window.surface().is_supported(q).unwrap_or(false))
.expect("couldn't find a graphical queue family");
let device_ext = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let (device, mut queues) = vulkano::device::Device::new(&physical, physical.supported_features(),
&device_ext, [(queue, 0.5)].iter().cloned())
.expect("failed to create device");
let queue = queues.next().unwrap();
let (swapchain, images) = {
let caps = window.surface().get_capabilities(&physical).expect("failed to get surface capabilities");
let dimensions = caps.current_extent.unwrap_or([WIN_WIDTH as u32, WIN_HEIGTH as u32]);
let present = caps.present_modes.iter().next().unwrap();
let usage = caps.supported_usage_flags;
vulkano::swapchain::Swapchain::new(&device, &window.surface(), caps.min_image_count,
vulkano::format::B8G8R8A8Srgb, dimensions, 1,
&usage, &queue, vulkano::swapchain::SurfaceTransform::Identity,
vulkano::swapchain::CompositeAlpha::Opaque,
present, true, None).expect("failed to create swapchain")
};
#[derive(Debug, Clone)]
struct Vertex { position: [f32; 2] }
impl_vertex!(Vertex, position);
let vertex_buffer = vulkano::buffer::cpu_access::CpuAccessibleBuffer::<[Vertex]>
::from_iter(&device, &vulkano::buffer::BufferUsage::all(),
Some(queue.family()), [
Vertex { position: [-0.5, -0.5 ] },
Vertex { position: [-0.5, 0.5 ] },
Vertex { position: [ 0.5, -0.5 ] },
Vertex { position: [ 0.5, 0.5 ] },
].iter().cloned()).expect("failed to create buffer");
mod vs { include!{concat!(env!("OUT_DIR"), "/shaders/src/shader/image_vs.glsl")} }
let vs = vs::Shader::load(&device).expect("failed to create shader module");
mod fs { include!{concat!(env!("OUT_DIR"), "/shaders/src/shader/image_fs.glsl")} }
let fs = fs::Shader::load(&device).expect("failed to create shader module");
mod renderpass {
single_pass_renderpass!{
attachments: {
color: {
load: Clear,
store: Store,
format: ::vulkano::format::B8G8R8A8Srgb,
}
},
pass: {
color: [color],
depth_stencil: {}
}
}
}
let renderpass = renderpass::CustomRenderPass::new(&device, &renderpass::Formats {
color: (vulkano::format::B8G8R8A8Srgb, 1)
}).unwrap();
let texture = vulkano::image::immutable::ImmutableImage::new(&device, vulkano::image::Dimensions::Dim2d { width: WIN_WIDTH as u32, height: WIN_HEIGTH as u32 },
vulkano::format::R8G8B8A8Unorm, Some(queue.family())).unwrap();
let sampler = vulkano::sampler::Sampler::new(&device, vulkano::sampler::Filter::Linear,
vulkano::sampler::Filter::Linear, vulkano::sampler::MipmapMode::Nearest,
vulkano::sampler::SamplerAddressMode::Repeat,
vulkano::sampler::SamplerAddressMode::Repeat,
vulkano::sampler::SamplerAddressMode::Repeat,
0.0, 1.0, 0.0, 0.0).unwrap();
let descriptor_pool = vulkano::descriptor::descriptor_set::DescriptorPool::new(&device);
mod pipeline_layout {
pipeline_layout!{
set0: {
tex: CombinedImageSampler
}
}
}
let pipeline_layout = pipeline_layout::CustomPipeline::new(&device).unwrap();
let set = pipeline_layout::set0::Set::new(&descriptor_pool, &pipeline_layout, &pipeline_layout::set0::Descriptors {
tex: (&sampler, &texture)
});
let pipeline = vulkano::pipeline::GraphicsPipeline::new(&device, vulkano::pipeline::GraphicsPipelineParams {
vertex_input: vulkano::pipeline::vertex::SingleBufferDefinition::new(),
vertex_shader: vs.main_entry_point(),
input_assembly: vulkano::pipeline::input_assembly::InputAssembly {
topology: vulkano::pipeline::input_assembly::PrimitiveTopology::TriangleStrip,
primitive_restart_enable: false,
},
tessellation: None,
geometry_shader: None,
viewport: vulkano::pipeline::viewport::ViewportsState::Fixed {
data: vec![(
vulkano::pipeline::viewport::Viewport {
origin: [0.0, 0.0],
depth_range: 0.0.. 1.0,
dimensions: [images[0].dimensions()[0] as f32, images[0].dimensions()[1] as f32],
},
vulkano::pipeline::viewport::Scissor::irrelevant()
)],
},
raster: Default::default(),
multisample: vulkano::pipeline::multisample::Multisample::disabled(),
fragment_shader: fs.main_entry_point(),
depth_stencil: vulkano::pipeline::depth_stencil::DepthStencil::disabled(),
blend: vulkano::pipeline::blend::Blend::pass_through(),
layout: &pipeline_layout,
render_pass: vulkano::framebuffer::Subpass::from(&renderpass, 0).unwrap(),
}).unwrap();
let framebuffers = images.iter().map(|image| {
let attachments = renderpass::AList {
color: &image,
};
vulkano::framebuffer::Framebuffer::new(&renderpass, [images[0].dimensions()[0], images[0].dimensions()[1], 1], attachments).unwrap()
}).collect::<Vec<_>>();
'main: loop {
// Draw in the FBO and read the pixel data from it
gl_draw(&handles);
// Create renderbuffer for color data
let color_render_buffer = gl::gen_renderbuffers(1)[0];
gl::bind_renderbuffer(gl::RENDERBUFFER, color_render_buffer);
gl::renderbuffer_storage(gl::RENDERBUFFER, gl::RGBA, WIN_WIDTH, WIN_HEIGTH);
gl::framebuffer_renderbuffer(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::RENDERBUFFER, color_render_buffer);
if gl::check_frame_buffer_status(gl::FRAMEBUFFER)!= gl::FRAMEBUFFER_COMPLETE {
panic!("Something went wrong :/.");
}
let pixel_data = gl::read_pixels(0, 0, WIN_WIDTH, WIN_HEIGTH, gl::RGBA, gl::UNSIGNED_BYTE);
let pixel_buffer = {
let image_data_chunks = pixel_data.chunks(4).map(|c| [c[0], c[1], c[2], c[3]]);
vulkano::buffer::cpu_access::CpuAccessibleBuffer::<[[u8;4]]>
::from_iter(&device, &vulkano::buffer::BufferUsage::all(),
Some(queue.family()), image_data_chunks)
.expect("failed to create buffer")
};
let command_buffers = framebuffers.iter().map(|framebuffer| {
vulkano::command_buffer::PrimaryCommandBufferBuilder::new(&device, queue.family())
.copy_buffer_to_color_image(&pixel_buffer, &texture, 0, 0.. 1, [0, 0, 0],
[texture.dimensions().width(), texture.dimensions().height(), 1])
//.clear_color_image(&texture, [0.0, 1.0, 0.0, 1.0])
.draw_inline(&renderpass, &framebuffer, renderpass::ClearValues {
color: [0.0, 0.0, 1.0, 1.0]
})
.draw(&pipeline, &vertex_buffer, &vulkano::command_buffer::DynamicState::none(),
&set, &())
.draw_end()
.build()
}).collect::<Vec<_>>();
let image_num = swapchain.acquire_next_image(Duration::new(10, 0)).unwrap();
vulkano::command_buffer::submit(&command_buffers[image_num], &queue).unwrap();
swapchain.present(&queue, image_num).unwrap();
for ev in window.window().poll_events() {
match ev {
winit::Event::Closed => break'main,
_ => ()
}
}
}
// Free gl resources
gl::use_program(0);
gl::disable_vertex_attrib_array(0);
gl::disable_vertex_attrib_array(1);
gl::disable_vertex_attrib_array(2);
// gl::detach_shader(handles.program.get(), vertexshader);
// gl::detach_shader(handles.program.get(), fragmentshader);
// gl::delete_shader(vertexshader);
// gl::delete_shader(fragmentshader);
gl::delete_program(handles.program.get());
gl::delete_buffers(&handles.vbos.get());
gl::delete_vertex_arrays(&[handles.vao.get()]);
}
| new | identifier_name |
main.rs | extern crate cgmath;
extern crate euclid;
extern crate gleam;
extern crate glutin;
extern crate image;
extern crate lodepng;
extern crate offscreen_gl_context;
#[macro_use]
extern crate vulkano;
extern crate vulkano_win;
extern crate winit;
use euclid::Size2D;
use gleam::gl;
use offscreen_gl_context::{ColorAttachmentType, GLContext, GLContextAttributes, NativeGLContext};
use std::time::Duration;
use vulkano_win::VkSurfaceBuild;
use std::cell::Cell;
struct GlHandles {
pub vao: Cell<gl::GLuint>,
pub vbos: Cell<[gl::GLuint; 2]>,
pub program: Cell<gl::GLuint>,
pub scale: Cell<f32>,
}
impl GlHandles {
fn new() -> GlHandles {
GlHandles {
vao: Cell::new(0 as gl::GLuint),
vbos: Cell::new([0,0]),
program: Cell::new(0 as gl::GLuint),
scale: Cell::new(0.0),
}
}
}
const CLEAR_COLOR: (f32, f32, f32, f32) = (0.0, 0.2, 0.3, 1.0);
const WIN_WIDTH: i32 = 256;
const WIN_HEIGTH: i32 = 256;
const VS_SHADER: &'static str = "
#version 150 core
in vec2 a_Pos;
in vec3 a_Color;
uniform mat4 scale;
out vec4 v_Color;
void main() {
v_Color = vec4(a_Color, 1.0);
gl_Position = scale * vec4(0.5 * a_Pos, 0.0, 1.0);
}
";
const FS_SHADER: &'static str = "
#version 150 core
in vec4 v_Color;
out vec4 Target0;
void main() {
Target0 = v_Color;
}
";
const VERTICES: &'static [[f32;2];3] = &[
[-1.0, -0.57],
[ 1.0, -0.57],
[ 0.0, 1.15]
];
const COLORS: &'static [[f32;3];3] = &[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
];
fn add_shader(program: gl::GLuint, src: &str, ty: gl::GLenum) {
let id = unsafe { gl::CreateShader(ty) };
if id == (0 as gl::GLuint) {
panic!("Failed to create shader type: {:?}", ty);
}
let mut source = Vec::new();
source.extend_from_slice(src.as_bytes());
gl::shader_source(id, &[&source[..]]);
gl::compile_shader(id);
let log = gl::get_shader_info_log(id);
if gl::get_shader_iv(id, gl::COMPILE_STATUS) == (0 as gl::GLint) {
panic!("Failed to compile shader:\n{}", log);
} else |
}
fn compile_shaders(handles: &GlHandles) {
handles.program.set(gl::create_program());
if handles.program.get() == (0 as gl::GLuint) {
panic!("Failed to create shader program");
}
add_shader(handles.program.get(), VS_SHADER, gl::VERTEX_SHADER);
add_shader(handles.program.get(), FS_SHADER, gl::FRAGMENT_SHADER);
gl::link_program(handles.program.get());
if gl::get_program_iv(handles.program.get(), gl::LINK_STATUS) == (0 as gl::GLint) {
let error_log = gl::get_program_info_log(handles.program.get());
panic!("Failed to link shader program: \n{}", error_log);
}
gl::validate_program(handles.program.get());
if gl::get_program_iv(handles.program.get(), gl::VALIDATE_STATUS) == (0 as gl::GLint) {
let error_log = gl::get_program_info_log(handles.program.get());
panic!("Failed to validate shader program: \n{}", error_log);
}
gl::use_program(handles.program.get());
}
fn gl_draw(handles: &GlHandles) {
gl::clear_color(CLEAR_COLOR.0, CLEAR_COLOR.1, CLEAR_COLOR.2, CLEAR_COLOR.3);
gl::clear(gl::COLOR_BUFFER_BIT);
gl::bind_buffer(gl::ARRAY_BUFFER, handles.vbos.get()[0]);
gl::buffer_data(gl::ARRAY_BUFFER, VERTICES, gl::STATIC_DRAW);
gl::vertex_attrib_pointer(0 as gl::GLuint,
2,
gl::FLOAT,
false,
0 as gl::GLint,
0);
gl::enable_vertex_attrib_array(0 as gl::GLuint);
gl::bind_buffer(gl::ARRAY_BUFFER, handles.vbos.get()[1]);
gl::buffer_data(gl::ARRAY_BUFFER, COLORS, gl::STATIC_DRAW);
gl::vertex_attrib_pointer(1 as gl::GLuint,
3,
gl::FLOAT,
false,
0 as gl::GLint,
0);
gl::enable_vertex_attrib_array(1 as gl::GLuint);
handles.scale.set(handles.scale.get() + 0.01);
let scale = handles.scale.get();
let rot_matrix = [
scale.cos(), -1.0 * scale.sin(), 0.0, 0.0,
scale.sin(), scale.cos(), 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
];
let scale_pos = gl::get_uniform_location(handles.program.get(), "scale");
gl::uniform_matrix_4fv(scale_pos, false, &rot_matrix);
gl::draw_arrays(gl::TRIANGLES, 0, 3);
}
pub fn main() {
gl::load_with(|s| GLContext::<NativeGLContext>::get_proc_address(s) as *const _);
let offscreen_ctx = GLContext::<NativeGLContext>::new(Size2D::new(256, 256),
GLContextAttributes::default(),
ColorAttachmentType::Renderbuffer,
None).unwrap();
offscreen_ctx.make_current().unwrap();
let handles: GlHandles = GlHandles::new();
// Create VAO and VBOs
handles.vao.set(gl::gen_vertex_arrays(1)[0]);
gl::bind_vertex_array(handles.vao.get());
let buffer_ids = gl::gen_buffers(2);
handles.vbos.set([buffer_ids[0], buffer_ids[1]]);
compile_shaders(&handles);
// Create FBO and bind it
let fbo = gl::gen_framebuffers(1)[0];
gl::bind_framebuffer(gl::FRAMEBUFFER, fbo);
let extensions = vulkano_win::required_extensions();
let instance = vulkano::instance::Instance::new(None, &extensions, &[]).expect("failed to create instance");
let physical = vulkano::instance::PhysicalDevice::enumerate(&instance)
.next().expect("no device available");
println!("Using device: {} (type: {:?})", physical.name(), physical.ty());
let window = winit::WindowBuilder::new().build_vk_surface(&instance).unwrap();
let queue = physical.queue_families().find(|q| q.supports_graphics() &&
window.surface().is_supported(q).unwrap_or(false))
.expect("couldn't find a graphical queue family");
let device_ext = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
let (device, mut queues) = vulkano::device::Device::new(&physical, physical.supported_features(),
&device_ext, [(queue, 0.5)].iter().cloned())
.expect("failed to create device");
let queue = queues.next().unwrap();
let (swapchain, images) = {
let caps = window.surface().get_capabilities(&physical).expect("failed to get surface capabilities");
let dimensions = caps.current_extent.unwrap_or([WIN_WIDTH as u32, WIN_HEIGTH as u32]);
let present = caps.present_modes.iter().next().unwrap();
let usage = caps.supported_usage_flags;
vulkano::swapchain::Swapchain::new(&device, &window.surface(), caps.min_image_count,
vulkano::format::B8G8R8A8Srgb, dimensions, 1,
&usage, &queue, vulkano::swapchain::SurfaceTransform::Identity,
vulkano::swapchain::CompositeAlpha::Opaque,
present, true, None).expect("failed to create swapchain")
};
#[derive(Debug, Clone)]
struct Vertex { position: [f32; 2] }
impl_vertex!(Vertex, position);
let vertex_buffer = vulkano::buffer::cpu_access::CpuAccessibleBuffer::<[Vertex]>
::from_iter(&device, &vulkano::buffer::BufferUsage::all(),
Some(queue.family()), [
Vertex { position: [-0.5, -0.5 ] },
Vertex { position: [-0.5, 0.5 ] },
Vertex { position: [ 0.5, -0.5 ] },
Vertex { position: [ 0.5, 0.5 ] },
].iter().cloned()).expect("failed to create buffer");
mod vs { include!{concat!(env!("OUT_DIR"), "/shaders/src/shader/image_vs.glsl")} }
let vs = vs::Shader::load(&device).expect("failed to create shader module");
mod fs { include!{concat!(env!("OUT_DIR"), "/shaders/src/shader/image_fs.glsl")} }
let fs = fs::Shader::load(&device).expect("failed to create shader module");
mod renderpass {
single_pass_renderpass!{
attachments: {
color: {
load: Clear,
store: Store,
format: ::vulkano::format::B8G8R8A8Srgb,
}
},
pass: {
color: [color],
depth_stencil: {}
}
}
}
let renderpass = renderpass::CustomRenderPass::new(&device, &renderpass::Formats {
color: (vulkano::format::B8G8R8A8Srgb, 1)
}).unwrap();
let texture = vulkano::image::immutable::ImmutableImage::new(&device, vulkano::image::Dimensions::Dim2d { width: WIN_WIDTH as u32, height: WIN_HEIGTH as u32 },
vulkano::format::R8G8B8A8Unorm, Some(queue.family())).unwrap();
let sampler = vulkano::sampler::Sampler::new(&device, vulkano::sampler::Filter::Linear,
vulkano::sampler::Filter::Linear, vulkano::sampler::MipmapMode::Nearest,
vulkano::sampler::SamplerAddressMode::Repeat,
vulkano::sampler::SamplerAddressMode::Repeat,
vulkano::sampler::SamplerAddressMode::Repeat,
0.0, 1.0, 0.0, 0.0).unwrap();
let descriptor_pool = vulkano::descriptor::descriptor_set::DescriptorPool::new(&device);
mod pipeline_layout {
pipeline_layout!{
set0: {
tex: CombinedImageSampler
}
}
}
let pipeline_layout = pipeline_layout::CustomPipeline::new(&device).unwrap();
let set = pipeline_layout::set0::Set::new(&descriptor_pool, &pipeline_layout, &pipeline_layout::set0::Descriptors {
tex: (&sampler, &texture)
});
let pipeline = vulkano::pipeline::GraphicsPipeline::new(&device, vulkano::pipeline::GraphicsPipelineParams {
vertex_input: vulkano::pipeline::vertex::SingleBufferDefinition::new(),
vertex_shader: vs.main_entry_point(),
input_assembly: vulkano::pipeline::input_assembly::InputAssembly {
topology: vulkano::pipeline::input_assembly::PrimitiveTopology::TriangleStrip,
primitive_restart_enable: false,
},
tessellation: None,
geometry_shader: None,
viewport: vulkano::pipeline::viewport::ViewportsState::Fixed {
data: vec![(
vulkano::pipeline::viewport::Viewport {
origin: [0.0, 0.0],
depth_range: 0.0.. 1.0,
dimensions: [images[0].dimensions()[0] as f32, images[0].dimensions()[1] as f32],
},
vulkano::pipeline::viewport::Scissor::irrelevant()
)],
},
raster: Default::default(),
multisample: vulkano::pipeline::multisample::Multisample::disabled(),
fragment_shader: fs.main_entry_point(),
depth_stencil: vulkano::pipeline::depth_stencil::DepthStencil::disabled(),
blend: vulkano::pipeline::blend::Blend::pass_through(),
layout: &pipeline_layout,
render_pass: vulkano::framebuffer::Subpass::from(&renderpass, 0).unwrap(),
}).unwrap();
let framebuffers = images.iter().map(|image| {
let attachments = renderpass::AList {
color: &image,
};
vulkano::framebuffer::Framebuffer::new(&renderpass, [images[0].dimensions()[0], images[0].dimensions()[1], 1], attachments).unwrap()
}).collect::<Vec<_>>();
'main: loop {
// Draw in the FBO and read the pixel data from it
gl_draw(&handles);
// Create renderbuffer for color data
let color_render_buffer = gl::gen_renderbuffers(1)[0];
gl::bind_renderbuffer(gl::RENDERBUFFER, color_render_buffer);
gl::renderbuffer_storage(gl::RENDERBUFFER, gl::RGBA, WIN_WIDTH, WIN_HEIGTH);
gl::framebuffer_renderbuffer(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::RENDERBUFFER, color_render_buffer);
if gl::check_frame_buffer_status(gl::FRAMEBUFFER)!= gl::FRAMEBUFFER_COMPLETE {
panic!("Something went wrong :/.");
}
let pixel_data = gl::read_pixels(0, 0, WIN_WIDTH, WIN_HEIGTH, gl::RGBA, gl::UNSIGNED_BYTE);
let pixel_buffer = {
let image_data_chunks = pixel_data.chunks(4).map(|c| [c[0], c[1], c[2], c[3]]);
vulkano::buffer::cpu_access::CpuAccessibleBuffer::<[[u8;4]]>
::from_iter(&device, &vulkano::buffer::BufferUsage::all(),
Some(queue.family()), image_data_chunks)
.expect("failed to create buffer")
};
let command_buffers = framebuffers.iter().map(|framebuffer| {
vulkano::command_buffer::PrimaryCommandBufferBuilder::new(&device, queue.family())
.copy_buffer_to_color_image(&pixel_buffer, &texture, 0, 0.. 1, [0, 0, 0],
[texture.dimensions().width(), texture.dimensions().height(), 1])
//.clear_color_image(&texture, [0.0, 1.0, 0.0, 1.0])
.draw_inline(&renderpass, &framebuffer, renderpass::ClearValues {
color: [0.0, 0.0, 1.0, 1.0]
})
.draw(&pipeline, &vertex_buffer, &vulkano::command_buffer::DynamicState::none(),
&set, &())
.draw_end()
.build()
}).collect::<Vec<_>>();
let image_num = swapchain.acquire_next_image(Duration::new(10, 0)).unwrap();
vulkano::command_buffer::submit(&command_buffers[image_num], &queue).unwrap();
swapchain.present(&queue, image_num).unwrap();
for ev in window.window().poll_events() {
match ev {
winit::Event::Closed => break'main,
_ => ()
}
}
}
// Free gl resources
gl::use_program(0);
gl::disable_vertex_attrib_array(0);
gl::disable_vertex_attrib_array(1);
gl::disable_vertex_attrib_array(2);
// gl::detach_shader(handles.program.get(), vertexshader);
// gl::detach_shader(handles.program.get(), fragmentshader);
// gl::delete_shader(vertexshader);
// gl::delete_shader(fragmentshader);
gl::delete_program(handles.program.get());
gl::delete_buffers(&handles.vbos.get());
gl::delete_vertex_arrays(&[handles.vao.get()]);
}
| {
if !log.is_empty() {
println!("Warnings detected on shader:\n{}", log);
}
gl::attach_shader(program, id);
} | conditional_block |
types.rs | extern crate "rustc-serialize" as rustc_serialize;
extern crate uuid;
use uuid::Uuid;
use rustc_serialize::{json, Encodable, Decodable};
use rustc_serialize::base64::{ToBase64, FromBase64, Config, CharacterSet, Newline};
use types::NodeState::{Leader, Follower, Candidate};
use types::TransactionState::{Polling, Accepted, Rejected};
use std::fs::{File, OpenOptions};
use std::fs;
use std::str;
use std::str::StrExt;
use std::io;
use std::io::{Write, ReadExt, Seek};
use std::old_io::IoError;
use std::marker;
/// Persistent state
/// **Must be updated to stable storage before RPC response.**
pub struct | <T: Encodable + Decodable + Send + Clone> {
current_term: u64,
voted_for: Option<u64>, // request_vote cares if this is `None`
log: File,
last_index: u64, // The last index of the file.
last_term: u64, // The last index of the file.
marker: marker::PhantomData<T>, // A marker... Because of
// https://github.com/rust-lang/rfcs/blob/master/text/0738-variance.md#the-corner-case-unused-parameters-and-parameters-that-are-only-used-unsafely
}
impl<T: Encodable + Decodable + Send + Clone> PersistentState<T> {
pub fn new(current_term: u64, log_path: Path) -> PersistentState<T> {
let mut open_opts = OpenOptions::new();
open_opts.read(true);
open_opts.write(true);
open_opts.create(true);
let mut file = open_opts.open(&log_path).unwrap();
write!(&mut file, "{:20} {:20}\n", current_term, 0).unwrap();
PersistentState {
current_term: current_term,
voted_for: None,
log: file,
last_index: 0,
last_term: 0,
marker: marker::PhantomData,
}
}
/// Gets the `last_index` which you can use to make append requests with.
pub fn get_last_index(&self) -> u64 { self.last_index }
pub fn get_last_term(&self) -> u64 { self.last_term }
/// Gets the `current_term` which is used for request vote.
pub fn get_current_term(&self) -> u64 { self.current_term }
/// Sets the current_term. **This should reflect on stable storage.**
pub fn set_current_term(&mut self, new: u64) -> io::Result<()> {
// The first line is the header with `current_term`, `voted_for`.
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
self.current_term = new;
// TODO: What do we do about the none case?
write!(&mut self.log, "{:20} {:20}\n", self.current_term, self.voted_for.unwrap_or(0))
}
/// Increments the current_term. **This should reflect on stable storage.**
pub fn inc_current_term(&mut self) { self.current_term += 1 }
/// Gets the `voted_for`.
pub fn get_voted_for(&mut self) -> Option<u64> { self.voted_for }
/// Sets the `voted_for. **This should reflect on stable storage.**
pub fn set_voted_for(&mut self, new: Option<u64>) -> io::Result<()> {
// The first line is the header with `current_term`, `voted_for`.
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
self.voted_for = new;
// TODO: What do we do about the none case?
write!(&mut self.log, "{:20} {:20}\n", self.current_term, self.voted_for.unwrap_or(0))
}
pub fn append_entries(&mut self, prev_log_index: u64, prev_log_term: u64,
entries: Vec<(u64, T)>) -> io::Result<()> {
// TODO: No checking of `prev_log_index` & `prev_log_term` yet... Do we need to?
let position = try!(self.move_to(prev_log_index + 1));
let number = entries.len();
let last_term = entries[entries.len() -1].0;
try!(self.purge_from_bytes(position)); // Update `last_log_index` later.
// TODO: Possibly purge.
for (term, entry) in entries {
// TODO: I don't like the "doubling" here. How can we do this better?
write!(&mut self.log, "{} {}\n", term, PersistentState::encode(entry));
}
self.last_index = if prev_log_index == 0 {
number as u64 - 1
} else { prev_log_index + number as u64 };
self.last_term = last_term;
Ok(())
}
fn encode(entry: T) -> String {
let json_encoded = json::encode(&entry)
.unwrap(); // TODO: Don't unwrap.
json_encoded.as_bytes().to_base64(Config {
char_set: CharacterSet::UrlSafe,
newline: Newline::LF,
pad: false,
line_length: None,
})
}
fn decode(bytes: String) -> Result<T, rustc_serialize::json::DecoderError> {
let based = bytes.from_base64()
.ok().expect("Decoding error. log likely corrupt.");
let string = str::from_utf8(based.as_slice())
.unwrap();
json::decode::<T>(string)
}
/// Returns the number of bytes containing `line` lines.
/// TODO: Cache?
fn move_to(&mut self, line: u64) -> io::Result<u64> {
// Gotcha: The first line is NOT a log entry.
let mut lines_read = 0u64;
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
// Go until we've reached `from` new lines.
let _ = self.log.by_ref().chars().skip_while(|opt| {
match *opt {
Ok(val) => {
if val == '\n' {
lines_read += 1;
if lines_read > line { // Greater than here because the first line is a bust.
false // At right location.
} else {
true // Not done yet, more lines to go.
}
} else {
true // Not a new line.
}
},
_ => false // At EOF. Nothing to purge.
}
}).next(); // Side effects.
self.log.seek(io::SeekFrom::Current(0)) // Where are we?
}
/// Do *not* invoke this unless you update the `last_index`!
fn purge_from_bytes(&mut self, from_bytes: u64) -> io::Result<()> {
self.log.set_len(from_bytes) // Chop off the file at the given position.
}
/// Removes all entries from `from` to the last entry, inclusively.
pub fn purge_from_index(&mut self, from_line: u64) -> io::Result<()> {
let position = try!(self.move_to(from_line));
self.last_index = from_line - 1;
self.purge_from_bytes(position)
}
pub fn retrieve_entries(&mut self, start: u64, end: u64) -> io::Result<Vec<(u64, T)>> {
let position = self.move_to(start);
let mut index = start;
let mut out = vec![];
let mut read_in = self.log.by_ref()
.chars()
.take_while(|val| val.is_ok())
.filter_map(|val| val.ok()); // We don't really care about issues here.
for index in range(start, end +1) {
let mut chars = read_in.by_ref()
.take_while(|&val| val!= '\n')
.collect::<String>();
if chars.len() == 0 { continue; }
let entry = try!(parse_entry::<T>(chars));
out.push(entry);
}
Ok(out)
}
pub fn retrieve_entry(&mut self, index: u64) -> io::Result<(u64, T)> {
let position = self.move_to(index);
let mut chars = self.log.by_ref()
.chars()
.take_while(|val| val.is_ok())
.filter_map(|val| val.ok()) // We don't really care about issues here.
.take_while(|&val| val!= '\n').collect::<String>();
parse_entry::<T>(chars)
}
}
fn parse_entry<T: Encodable + Decodable + Send + Clone>(val: String) -> io::Result<(u64, T)> {
let mut splits = val.split(' ');
let term = {
let chunk = splits.next()
.and_then(|v| v.parse::<u64>().ok());
match chunk {
Some(v) => v,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "Could not parse term.", None)),
}
};
let encoded = {
let chunk = splits.next();
match chunk {
Some(v) => v,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "Could not parse encoded data.", None)),
}
};
let decoded: T = PersistentState::decode(encoded.to_string())
.ok().expect("Could not unwrap log entry.");
Ok((term, decoded))
}
/// Volatile state
#[derive(Copy)]
pub struct VolatileState {
pub commit_index: u64,
pub last_applied: u64
}
/// Leader Only
/// **Reinitialized after election.**
#[derive(PartialEq, Eq, Clone)]
pub struct LeaderState {
pub next_index: Vec<u64>,
pub match_index: Vec<u64>
}
/// Nodes can either be:
///
/// * A `Follower`, which replicates AppendEntries requests and votes for it's leader.
/// * A `Leader`, which leads the cluster by serving incoming requests, ensuring data is
/// replicated, and issuing heartbeats..
/// * A `Candidate`, which campaigns in an election and may become a `Leader` (if it gets enough
/// votes) or a `Follower`, if it hears from a `Leader`.
#[derive(PartialEq, Eq, Clone)]
pub enum NodeState {
Follower,
Leader(LeaderState),
Candidate(Vec<Transaction>),
}
#[derive(PartialEq, Eq, Clone)]
pub struct Transaction {
pub uuid: Uuid,
pub state: TransactionState,
}
/// Used to signify the state of of a Request/Response pair. This is only needed
/// on the original sender... not on the reciever.
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum TransactionState {
Polling,
Accepted,
Rejected,
}
#[test]
fn test_persistent_state() {
let path = Path::new("/tmp/test_path");
fs::remove_file(&path.clone());
let mut state = PersistentState::new(0, path.clone());
// Add 0, 1
assert_eq!(state.append_entries(0, 0,
vec![(0, "Zero".to_string()),
(1, "One".to_string())]),
Ok(()));
// Check index.
assert_eq!(state.get_last_index(), 1);
// Check 0
assert_eq!(state.retrieve_entry(0),
Ok((0, "Zero".to_string())));
// Check 0, 1
assert_eq!(state.retrieve_entries(0, 1),
Ok(vec![(0, "Zero".to_string()),
(1, "One".to_string())
]));
// Check 1
assert_eq!(state.retrieve_entry(1),
Ok((1, "One".to_string())));
// Add 2, 3
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 3);
// Check 2, 3
assert_eq!(state.retrieve_entries(2, 3),
Ok(vec![(2, "Two".to_string()),
(3, "Three".to_string())
]));
// Remove 2, 3
assert_eq!(state.purge_from_index(2),
Ok(()));
assert_eq!(state.get_last_index(), 1);
// Check 3,4 are removed, and that code handles lack of entry gracefully.
assert_eq!(state.retrieve_entries(0, 4),
Ok(vec![(0, "Zero".to_string()),
(1, "One".to_string())
]));
// Add 2,3,4.
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string()),
(4, "Four".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 4);
// Add 2,3 again. (4 should be purged)
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 3);
fs::remove_file(&path.clone());
}
| PersistentState | identifier_name |
types.rs | extern crate "rustc-serialize" as rustc_serialize;
extern crate uuid;
use uuid::Uuid;
use rustc_serialize::{json, Encodable, Decodable};
use rustc_serialize::base64::{ToBase64, FromBase64, Config, CharacterSet, Newline};
use types::NodeState::{Leader, Follower, Candidate};
use types::TransactionState::{Polling, Accepted, Rejected};
use std::fs::{File, OpenOptions};
use std::fs;
use std::str;
use std::str::StrExt;
use std::io;
use std::io::{Write, ReadExt, Seek};
use std::old_io::IoError;
use std::marker;
/// Persistent state
/// **Must be updated to stable storage before RPC response.**
pub struct PersistentState<T: Encodable + Decodable + Send + Clone> {
current_term: u64,
voted_for: Option<u64>, // request_vote cares if this is `None`
log: File,
last_index: u64, // The last index of the file.
last_term: u64, // The last index of the file.
marker: marker::PhantomData<T>, // A marker... Because of
// https://github.com/rust-lang/rfcs/blob/master/text/0738-variance.md#the-corner-case-unused-parameters-and-parameters-that-are-only-used-unsafely
}
impl<T: Encodable + Decodable + Send + Clone> PersistentState<T> {
pub fn new(current_term: u64, log_path: Path) -> PersistentState<T> {
let mut open_opts = OpenOptions::new();
open_opts.read(true);
open_opts.write(true);
open_opts.create(true);
let mut file = open_opts.open(&log_path).unwrap();
write!(&mut file, "{:20} {:20}\n", current_term, 0).unwrap();
PersistentState {
current_term: current_term,
voted_for: None,
log: file,
last_index: 0,
last_term: 0,
marker: marker::PhantomData,
}
}
/// Gets the `last_index` which you can use to make append requests with.
pub fn get_last_index(&self) -> u64 { self.last_index }
pub fn get_last_term(&self) -> u64 { self.last_term }
/// Gets the `current_term` which is used for request vote.
pub fn get_current_term(&self) -> u64 { self.current_term }
/// Sets the current_term. **This should reflect on stable storage.**
pub fn set_current_term(&mut self, new: u64) -> io::Result<()> {
// The first line is the header with `current_term`, `voted_for`.
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
self.current_term = new;
// TODO: What do we do about the none case?
write!(&mut self.log, "{:20} {:20}\n", self.current_term, self.voted_for.unwrap_or(0))
}
/// Increments the current_term. **This should reflect on stable storage.**
pub fn inc_current_term(&mut self) { self.current_term += 1 }
/// Gets the `voted_for`.
pub fn get_voted_for(&mut self) -> Option<u64> { self.voted_for }
/// Sets the `voted_for. **This should reflect on stable storage.**
pub fn set_voted_for(&mut self, new: Option<u64>) -> io::Result<()> {
// The first line is the header with `current_term`, `voted_for`.
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
self.voted_for = new;
// TODO: What do we do about the none case?
write!(&mut self.log, "{:20} {:20}\n", self.current_term, self.voted_for.unwrap_or(0))
}
pub fn append_entries(&mut self, prev_log_index: u64, prev_log_term: u64,
entries: Vec<(u64, T)>) -> io::Result<()> {
// TODO: No checking of `prev_log_index` & `prev_log_term` yet... Do we need to?
let position = try!(self.move_to(prev_log_index + 1));
let number = entries.len();
let last_term = entries[entries.len() -1].0;
try!(self.purge_from_bytes(position)); // Update `last_log_index` later.
// TODO: Possibly purge.
for (term, entry) in entries {
// TODO: I don't like the "doubling" here. How can we do this better?
write!(&mut self.log, "{} {}\n", term, PersistentState::encode(entry));
}
self.last_index = if prev_log_index == 0 {
number as u64 - 1
} else { prev_log_index + number as u64 };
self.last_term = last_term;
Ok(())
}
fn encode(entry: T) -> String {
let json_encoded = json::encode(&entry)
.unwrap(); // TODO: Don't unwrap.
json_encoded.as_bytes().to_base64(Config {
char_set: CharacterSet::UrlSafe,
newline: Newline::LF,
pad: false,
line_length: None,
})
}
fn decode(bytes: String) -> Result<T, rustc_serialize::json::DecoderError> {
let based = bytes.from_base64()
.ok().expect("Decoding error. log likely corrupt.");
let string = str::from_utf8(based.as_slice())
.unwrap();
json::decode::<T>(string)
}
/// Returns the number of bytes containing `line` lines.
/// TODO: Cache?
fn move_to(&mut self, line: u64) -> io::Result<u64> {
// Gotcha: The first line is NOT a log entry.
let mut lines_read = 0u64;
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
// Go until we've reached `from` new lines.
let _ = self.log.by_ref().chars().skip_while(|opt| {
match *opt {
Ok(val) => {
if val == '\n' {
lines_read += 1;
if lines_read > line { // Greater than here because the first line is a bust.
false // At right location.
} else {
true // Not done yet, more lines to go.
}
} else {
true // Not a new line.
}
},
_ => false // At EOF. Nothing to purge.
}
}).next(); // Side effects.
self.log.seek(io::SeekFrom::Current(0)) // Where are we?
}
/// Do *not* invoke this unless you update the `last_index`!
fn purge_from_bytes(&mut self, from_bytes: u64) -> io::Result<()> {
self.log.set_len(from_bytes) // Chop off the file at the given position.
}
/// Removes all entries from `from` to the last entry, inclusively.
pub fn purge_from_index(&mut self, from_line: u64) -> io::Result<()> {
let position = try!(self.move_to(from_line));
self.last_index = from_line - 1;
self.purge_from_bytes(position)
}
pub fn retrieve_entries(&mut self, start: u64, end: u64) -> io::Result<Vec<(u64, T)>> {
let position = self.move_to(start);
let mut index = start;
let mut out = vec![];
let mut read_in = self.log.by_ref()
.chars()
.take_while(|val| val.is_ok())
.filter_map(|val| val.ok()); // We don't really care about issues here.
for index in range(start, end +1) {
let mut chars = read_in.by_ref()
.take_while(|&val| val!= '\n')
.collect::<String>();
if chars.len() == 0 { continue; }
let entry = try!(parse_entry::<T>(chars));
out.push(entry);
}
Ok(out)
}
pub fn retrieve_entry(&mut self, index: u64) -> io::Result<(u64, T)> {
let position = self.move_to(index);
let mut chars = self.log.by_ref()
.chars()
.take_while(|val| val.is_ok())
.filter_map(|val| val.ok()) // We don't really care about issues here.
.take_while(|&val| val!= '\n').collect::<String>();
parse_entry::<T>(chars)
}
}
fn parse_entry<T: Encodable + Decodable + Send + Clone>(val: String) -> io::Result<(u64, T)> {
let mut splits = val.split(' ');
let term = {
let chunk = splits.next()
.and_then(|v| v.parse::<u64>().ok());
match chunk {
Some(v) => v,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "Could not parse term.", None)),
}
};
let encoded = {
let chunk = splits.next();
match chunk {
Some(v) => v,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "Could not parse encoded data.", None)),
}
};
let decoded: T = PersistentState::decode(encoded.to_string())
.ok().expect("Could not unwrap log entry.");
Ok((term, decoded))
}
/// Volatile state
#[derive(Copy)]
pub struct VolatileState {
pub commit_index: u64,
pub last_applied: u64
}
/// Leader Only
/// **Reinitialized after election.**
#[derive(PartialEq, Eq, Clone)]
pub struct LeaderState {
pub next_index: Vec<u64>,
pub match_index: Vec<u64>
}
/// Nodes can either be:
///
/// * A `Follower`, which replicates AppendEntries requests and votes for it's leader.
/// * A `Leader`, which leads the cluster by serving incoming requests, ensuring data is
/// replicated, and issuing heartbeats..
/// * A `Candidate`, which campaigns in an election and may become a `Leader` (if it gets enough
/// votes) or a `Follower`, if it hears from a `Leader`.
#[derive(PartialEq, Eq, Clone)]
pub enum NodeState {
Follower,
Leader(LeaderState),
Candidate(Vec<Transaction>),
}
#[derive(PartialEq, Eq, Clone)]
pub struct Transaction {
pub uuid: Uuid,
pub state: TransactionState,
}
/// Used to signify the state of of a Request/Response pair. This is only needed
/// on the original sender... not on the reciever.
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum TransactionState {
Polling,
Accepted,
Rejected,
}
#[test]
fn test_persistent_state() {
let path = Path::new("/tmp/test_path");
fs::remove_file(&path.clone());
let mut state = PersistentState::new(0, path.clone());
// Add 0, 1
assert_eq!(state.append_entries(0, 0,
vec![(0, "Zero".to_string()),
(1, "One".to_string())]),
Ok(()));
// Check index.
assert_eq!(state.get_last_index(), 1);
// Check 0
assert_eq!(state.retrieve_entry(0),
Ok((0, "Zero".to_string())));
// Check 0, 1
assert_eq!(state.retrieve_entries(0, 1),
Ok(vec![(0, "Zero".to_string()),
(1, "One".to_string())
]));
// Check 1
assert_eq!(state.retrieve_entry(1),
Ok((1, "One".to_string())));
// Add 2, 3
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 3);
// Check 2, 3
assert_eq!(state.retrieve_entries(2, 3),
Ok(vec![(2, "Two".to_string()),
(3, "Three".to_string())
]));
// Remove 2, 3
assert_eq!(state.purge_from_index(2),
Ok(()));
assert_eq!(state.get_last_index(), 1);
// Check 3,4 are removed, and that code handles lack of entry gracefully.
assert_eq!(state.retrieve_entries(0, 4),
Ok(vec![(0, "Zero".to_string()),
(1, "One".to_string())
]));
// Add 2,3,4.
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()), | // Add 2,3 again. (4 should be purged)
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 3);
fs::remove_file(&path.clone());
} | (3, "Three".to_string()),
(4, "Four".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 4); | random_line_split |
types.rs | extern crate "rustc-serialize" as rustc_serialize;
extern crate uuid;
use uuid::Uuid;
use rustc_serialize::{json, Encodable, Decodable};
use rustc_serialize::base64::{ToBase64, FromBase64, Config, CharacterSet, Newline};
use types::NodeState::{Leader, Follower, Candidate};
use types::TransactionState::{Polling, Accepted, Rejected};
use std::fs::{File, OpenOptions};
use std::fs;
use std::str;
use std::str::StrExt;
use std::io;
use std::io::{Write, ReadExt, Seek};
use std::old_io::IoError;
use std::marker;
/// Persistent state
/// **Must be updated to stable storage before RPC response.**
pub struct PersistentState<T: Encodable + Decodable + Send + Clone> {
current_term: u64,
voted_for: Option<u64>, // request_vote cares if this is `None`
log: File,
last_index: u64, // The last index of the file.
last_term: u64, // The last index of the file.
marker: marker::PhantomData<T>, // A marker... Because of
// https://github.com/rust-lang/rfcs/blob/master/text/0738-variance.md#the-corner-case-unused-parameters-and-parameters-that-are-only-used-unsafely
}
impl<T: Encodable + Decodable + Send + Clone> PersistentState<T> {
pub fn new(current_term: u64, log_path: Path) -> PersistentState<T> {
let mut open_opts = OpenOptions::new();
open_opts.read(true);
open_opts.write(true);
open_opts.create(true);
let mut file = open_opts.open(&log_path).unwrap();
write!(&mut file, "{:20} {:20}\n", current_term, 0).unwrap();
PersistentState {
current_term: current_term,
voted_for: None,
log: file,
last_index: 0,
last_term: 0,
marker: marker::PhantomData,
}
}
/// Gets the `last_index` which you can use to make append requests with.
pub fn get_last_index(&self) -> u64 { self.last_index }
pub fn get_last_term(&self) -> u64 { self.last_term }
/// Gets the `current_term` which is used for request vote.
pub fn get_current_term(&self) -> u64 { self.current_term }
/// Sets the current_term. **This should reflect on stable storage.**
pub fn set_current_term(&mut self, new: u64) -> io::Result<()> {
// The first line is the header with `current_term`, `voted_for`.
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
self.current_term = new;
// TODO: What do we do about the none case?
write!(&mut self.log, "{:20} {:20}\n", self.current_term, self.voted_for.unwrap_or(0))
}
/// Increments the current_term. **This should reflect on stable storage.**
pub fn inc_current_term(&mut self) { self.current_term += 1 }
/// Gets the `voted_for`.
pub fn get_voted_for(&mut self) -> Option<u64> { self.voted_for }
/// Sets the `voted_for. **This should reflect on stable storage.**
pub fn set_voted_for(&mut self, new: Option<u64>) -> io::Result<()> {
// The first line is the header with `current_term`, `voted_for`.
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
self.voted_for = new;
// TODO: What do we do about the none case?
write!(&mut self.log, "{:20} {:20}\n", self.current_term, self.voted_for.unwrap_or(0))
}
pub fn append_entries(&mut self, prev_log_index: u64, prev_log_term: u64,
entries: Vec<(u64, T)>) -> io::Result<()> {
// TODO: No checking of `prev_log_index` & `prev_log_term` yet... Do we need to?
let position = try!(self.move_to(prev_log_index + 1));
let number = entries.len();
let last_term = entries[entries.len() -1].0;
try!(self.purge_from_bytes(position)); // Update `last_log_index` later.
// TODO: Possibly purge.
for (term, entry) in entries {
// TODO: I don't like the "doubling" here. How can we do this better?
write!(&mut self.log, "{} {}\n", term, PersistentState::encode(entry));
}
self.last_index = if prev_log_index == 0 {
number as u64 - 1
} else | ;
self.last_term = last_term;
Ok(())
}
fn encode(entry: T) -> String {
let json_encoded = json::encode(&entry)
.unwrap(); // TODO: Don't unwrap.
json_encoded.as_bytes().to_base64(Config {
char_set: CharacterSet::UrlSafe,
newline: Newline::LF,
pad: false,
line_length: None,
})
}
fn decode(bytes: String) -> Result<T, rustc_serialize::json::DecoderError> {
let based = bytes.from_base64()
.ok().expect("Decoding error. log likely corrupt.");
let string = str::from_utf8(based.as_slice())
.unwrap();
json::decode::<T>(string)
}
/// Returns the number of bytes containing `line` lines.
/// TODO: Cache?
fn move_to(&mut self, line: u64) -> io::Result<u64> {
// Gotcha: The first line is NOT a log entry.
let mut lines_read = 0u64;
self.log.seek(io::SeekFrom::Start(0)); // Take the start.
// Go until we've reached `from` new lines.
let _ = self.log.by_ref().chars().skip_while(|opt| {
match *opt {
Ok(val) => {
if val == '\n' {
lines_read += 1;
if lines_read > line { // Greater than here because the first line is a bust.
false // At right location.
} else {
true // Not done yet, more lines to go.
}
} else {
true // Not a new line.
}
},
_ => false // At EOF. Nothing to purge.
}
}).next(); // Side effects.
self.log.seek(io::SeekFrom::Current(0)) // Where are we?
}
/// Do *not* invoke this unless you update the `last_index`!
fn purge_from_bytes(&mut self, from_bytes: u64) -> io::Result<()> {
self.log.set_len(from_bytes) // Chop off the file at the given position.
}
/// Removes all entries from `from` to the last entry, inclusively.
pub fn purge_from_index(&mut self, from_line: u64) -> io::Result<()> {
let position = try!(self.move_to(from_line));
self.last_index = from_line - 1;
self.purge_from_bytes(position)
}
pub fn retrieve_entries(&mut self, start: u64, end: u64) -> io::Result<Vec<(u64, T)>> {
let position = self.move_to(start);
let mut index = start;
let mut out = vec![];
let mut read_in = self.log.by_ref()
.chars()
.take_while(|val| val.is_ok())
.filter_map(|val| val.ok()); // We don't really care about issues here.
for index in range(start, end +1) {
let mut chars = read_in.by_ref()
.take_while(|&val| val!= '\n')
.collect::<String>();
if chars.len() == 0 { continue; }
let entry = try!(parse_entry::<T>(chars));
out.push(entry);
}
Ok(out)
}
pub fn retrieve_entry(&mut self, index: u64) -> io::Result<(u64, T)> {
let position = self.move_to(index);
let mut chars = self.log.by_ref()
.chars()
.take_while(|val| val.is_ok())
.filter_map(|val| val.ok()) // We don't really care about issues here.
.take_while(|&val| val!= '\n').collect::<String>();
parse_entry::<T>(chars)
}
}
fn parse_entry<T: Encodable + Decodable + Send + Clone>(val: String) -> io::Result<(u64, T)> {
let mut splits = val.split(' ');
let term = {
let chunk = splits.next()
.and_then(|v| v.parse::<u64>().ok());
match chunk {
Some(v) => v,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "Could not parse term.", None)),
}
};
let encoded = {
let chunk = splits.next();
match chunk {
Some(v) => v,
None => return Err(io::Error::new(io::ErrorKind::InvalidInput, "Could not parse encoded data.", None)),
}
};
let decoded: T = PersistentState::decode(encoded.to_string())
.ok().expect("Could not unwrap log entry.");
Ok((term, decoded))
}
/// Volatile state
#[derive(Copy)]
pub struct VolatileState {
pub commit_index: u64,
pub last_applied: u64
}
/// Leader Only
/// **Reinitialized after election.**
#[derive(PartialEq, Eq, Clone)]
pub struct LeaderState {
pub next_index: Vec<u64>,
pub match_index: Vec<u64>
}
/// Nodes can either be:
///
/// * A `Follower`, which replicates AppendEntries requests and votes for it's leader.
/// * A `Leader`, which leads the cluster by serving incoming requests, ensuring data is
/// replicated, and issuing heartbeats..
/// * A `Candidate`, which campaigns in an election and may become a `Leader` (if it gets enough
/// votes) or a `Follower`, if it hears from a `Leader`.
#[derive(PartialEq, Eq, Clone)]
pub enum NodeState {
Follower,
Leader(LeaderState),
Candidate(Vec<Transaction>),
}
#[derive(PartialEq, Eq, Clone)]
pub struct Transaction {
pub uuid: Uuid,
pub state: TransactionState,
}
/// Used to signify the state of of a Request/Response pair. This is only needed
/// on the original sender... not on the reciever.
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum TransactionState {
Polling,
Accepted,
Rejected,
}
#[test]
fn test_persistent_state() {
let path = Path::new("/tmp/test_path");
fs::remove_file(&path.clone());
let mut state = PersistentState::new(0, path.clone());
// Add 0, 1
assert_eq!(state.append_entries(0, 0,
vec![(0, "Zero".to_string()),
(1, "One".to_string())]),
Ok(()));
// Check index.
assert_eq!(state.get_last_index(), 1);
// Check 0
assert_eq!(state.retrieve_entry(0),
Ok((0, "Zero".to_string())));
// Check 0, 1
assert_eq!(state.retrieve_entries(0, 1),
Ok(vec![(0, "Zero".to_string()),
(1, "One".to_string())
]));
// Check 1
assert_eq!(state.retrieve_entry(1),
Ok((1, "One".to_string())));
// Add 2, 3
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 3);
// Check 2, 3
assert_eq!(state.retrieve_entries(2, 3),
Ok(vec![(2, "Two".to_string()),
(3, "Three".to_string())
]));
// Remove 2, 3
assert_eq!(state.purge_from_index(2),
Ok(()));
assert_eq!(state.get_last_index(), 1);
// Check 3,4 are removed, and that code handles lack of entry gracefully.
assert_eq!(state.retrieve_entries(0, 4),
Ok(vec![(0, "Zero".to_string()),
(1, "One".to_string())
]));
// Add 2,3,4.
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string()),
(4, "Four".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 4);
// Add 2,3 again. (4 should be purged)
assert_eq!(state.append_entries(1, 2,
vec![(2, "Two".to_string()),
(3, "Three".to_string())]),
Ok(()));
assert_eq!(state.get_last_index(), 3);
fs::remove_file(&path.clone());
}
| { prev_log_index + number as u64 } | conditional_block |
feature_table.rs | //! # Feature Table
//!
//! Data model and parsers for the DDBJ/ENA/GenBank Feature Table.
//!
//! See: http://www.insdc.org/files/feature_table.html
use nom::{
IResult,
branch::{
alt,
},
bytes::complete::{
tag,
take_while_m_n,
take_while,
take_while1,
},
character::{
is_alphanumeric,
},
combinator::{
cut,
map,
opt,
verify,
},
error::{
ParseError,
},
multi::{
// many1,
separated_list,
},
sequence::{
tuple,
},
};
use super::parser::Nommed;
#[derive(Debug, PartialEq, Eq)]
pub struct FeatureTable {
features: Vec<FeatureRecord>
}
#[derive(Debug, PartialEq, Eq)]
pub struct FeatureRecord {
key: String,
location: LocOp,
qualifiers: Vec<Qualifier>
}
// impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for FeatureRecord {
// fn nom(input: &'a str) -> IResult<&'a str, FeatureRecord, E> {
// }
// }
/// An ID that's valid within the feature table.
///
/// This is:
/// * At least one letter
/// * Upper case, lower case letters
/// * Numbers 0..9
/// * Underscore (_)
/// * Hyphen (-)
/// * Single quote (')
/// * Asterisk (*)
/// The maximum length is 20 characters.
#[derive(Debug, PartialEq, Eq)]
pub struct FtString(String);
// litle utility for ranges.
//
// Note: couldn't use 'a'..='b' because this is an iterator, so doesn't
// implement `Copy`.
#[derive(Clone, Copy)]
struct Interval<T>(T, T);
impl <T : PartialOrd> Interval<T> {
fn contains(&self, e: &T) -> bool {
self.0 <= *e &&
*e <= self.1
}
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for FtString {
fn nom(input: &'a str) -> IResult<&'a str, FtString, E> {
let uc = Interval('A', 'Z');
let lc = Interval('a', 'z');
let di = Interval('0', '9');
let misc = "_-'*";
let ft_char = {
move |c: char|
uc.contains(&c) ||
lc.contains(&c) ||
di.contains(&c) ||
misc.contains(c)
};
let alpha = {
move |c: char|
uc.contains(&c) ||
lc.contains(&c)
};
map(
verify(
take_while_m_n(1, 20, ft_char),
move |s: &str| s.chars().any(alpha)
),
|s: &str| FtString(s.to_string())
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Qualifier {
name: FtString,
value: Option<QualifierValue>
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Qualifier {
fn nom(input: &'a str) -> IResult<&'a str, Qualifier, E> {
let parse_name = map(tuple((tag("/"), FtString::nom)), |(_, n)| n);
let parse_value = map(tuple((tag("="), QualifierValue::nom)), |(_, v)| v);
map(
tuple((parse_name, opt(parse_value))),
|(name, value)| Qualifier{ name, value }
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum QualifierValue {
QuotedText(String),
VocabularyTerm(FtString),
ReferenceNumber(u32),
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for QualifierValue{
fn nom(input: &'a str) -> IResult<&'a str, QualifierValue, E> {
let parse_quoted_text =
map(
tuple((tag("\""), take_while(|c| c!= '"'), tag("\""))),
|(_, v, _): (&str, &str, &str)| QualifierValue::QuotedText(v.to_string()));
let parse_vocabulary_term =
map(
FtString::nom,
QualifierValue::VocabularyTerm);
let parse_reference_number =
map(
tuple((tag("["), u32::nom, tag("]"))),
|(_, d, _)| QualifierValue::ReferenceNumber(d));
alt((
parse_quoted_text,
parse_vocabulary_term,
parse_reference_number
))(input)
}
}
//
//
// Location data model starts here
//
// Should really be in a sub-module I guess
//
//
/// A point within a sequence, representing a specific nucleotide. Counts from 1.
#[derive(Debug, PartialEq, Eq)]
pub struct Point(u32);
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Point {
fn nom(input: &'a str) -> IResult<&'a str, Point, E> {
map(u32::nom, Point)(input)
}
}
/// A position between two bases in a sequence.
/// pub
/// For example, 122^123. The locations must be consecutive.
///
/// For example, 100^1 for a circular sequence of length 100.
#[derive(Debug, PartialEq, Eq)]
pub struct Between(u32, u32);
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Between {
fn nom(input: &'a str) -> IResult<&'a str, Between, E> {
map(
tuple((
u32::nom,
tag("^"),
u32::nom
)),
|(from, _, to)| Between(from, to)
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Position {
Point(Point),
Between(Between)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Position {
fn nom(input: &'a str) -> IResult<&'a str, Position, E> {
alt((
map(Between::nom, Position::Between),
map(Point::nom, Position::Point)
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Local {
Point(Point),
Between(Between),
Within { from: Point, to: Point },
Span { from: Position, to: Position, before_from: bool, after_to: bool },
}
impl Local {
pub fn span(from: u32, to: u32) -> Local {
Local::Span {
from: Position::Point(Point(from)),
to: Position::Point(Point(to)),
before_from: false,
after_to: false }
}
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Local {
fn nom(input: &'a str) -> IResult<&'a str, Local, E> {
let parse_within = map(
tuple((Point::nom, tag("."), Point::nom)),
|(from, _, to)| Local::Within { from, to });
let parse_span = map(
tuple((
opt(tag("<")), Position::nom, tag(".."), opt(tag(">")), Position::nom)),
|(before_from, from, _, after_to, to)| Local::Span {
from,
to,
before_from: before_from.is_some(),
after_to: after_to.is_some() }
);
alt((
map(Between::nom, Local::Between),
parse_within,
parse_span,
map(Point::nom, Local::Point), // must do this last as it's a prefix of the others
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Loc {
Remote { within: String, at: Local },
Local(Local)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Loc {
fn nom(input: &'a str) -> IResult<&'a str, Loc, E> {
let parse_accession = take_while1(|c| {
let b = c as u8;
is_alphanumeric(b) || b == b'.'
});
alt((
map(
tuple((parse_accession, tag(":"), Local::nom)),
|(within, _, at)| Loc::Remote { within: within.to_string(), at }
),
map(Local::nom, Loc::Local)
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum LocOp {
Loc(Loc),
Complement(Box<LocOp>),
Join(Vec<LocOp>),
Order(Vec<LocOp>)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for LocOp {
fn nom(input: &'a str) -> IResult<&'a str, LocOp, E> {
let parse_complement =
map(
tuple((
tag("complement("),
cut(LocOp::nom),
tag(")")
)),
|(_, loc, _)| loc
);
let parse_join =
map(
tuple((
tag("join("),
cut(separated_list(tag(","), LocOp::nom)),
tag(")")
)),
|(_, locs, _)| locs
);
let parse_order =
map(
tuple((
tag("order("),
cut(separated_list(tag(","), LocOp::nom)),
tag(")")
)),
|(_, locs, _)| locs
);
alt((
map(Loc::nom, LocOp::Loc),
map(parse_complement, |loc| LocOp::Complement(Box::new(loc))),
map(parse_join, LocOp::Join),
map(parse_order, LocOp::Order)
))(input)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::error::{
convert_error,
VerboseError,
};
fn assert_nom_to_expected<'a, T>() -> impl Fn(&'a str, T) -> ()
where
T: Nommed<&'a str, VerboseError<&'a str>> + std::fmt::Debug + PartialEq
{
move |input: &str, expected: T| {
match T::nom(input) {
Ok((rem, ref res)) if!rem.is_empty() => panic!("Non-empty remaining input {}, parsed out {:?}", rem, res),
Ok((_, res)) => assert_eq!(res, expected, "Got result {:?} but expected {:?}", res, expected),
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => panic!("Problem: {}", convert_error(input, e)),
e => panic!("Unknown error: {:?}", e)
}
}
}
// #[test]
// fn test_parse_feature_record_from_spec() {
// let expect = assert_nom_to_expected::<FeatureRecord>();
// expect(
// r#"
// source 1..1000
// /culture_collection="ATCC:11775"
// /culture_collection="CECT:515"
// "#,
// FeatureRecord {
// key: "source".to_string(),
// location: LocOp::Loc(Loc::Local(Local::span(1, 1000))),
// qualifiers: vec![]
// }
// )
// }
#[test]
fn test_parse_qualifiers_from_spec() {
let expect = assert_nom_to_expected::<Qualifier>();
expect(
"/pseudo",
Qualifier {
name: FtString("pseudo".to_string()),
value: None });
expect(
"/citation=[1]",
Qualifier {
name: FtString("citation".to_string()),
value: Some(QualifierValue::ReferenceNumber(1)) });
expect(
"/gene=\"arsC\"",
Qualifier {
name: FtString("gene".to_string()),
value: Some(QualifierValue::QuotedText("arsC".to_string()))});
expect(
"/rpt_type=DISPERSED",
Qualifier {
name: FtString("rpt_type".to_string()),
value: Some(QualifierValue::VocabularyTerm(FtString("DISPERSED".to_string())))});
}
#[test]
fn test_parse_locations_from_spec() {
let expect = assert_nom_to_expected::<LocOp>();
expect(
"467",
LocOp::Loc(Loc::Local(Local::Point(Point(467)))));
| expect(
"340..565",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(340)),
to: Position::Point(Point(565)),
before_from: false,
after_to: false
})));
expect(
"<345..500",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(345)),
to: Position::Point(Point(500)),
before_from: true,
after_to: false
})));
expect(
"<1..888",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(1)),
to: Position::Point(Point(888)),
before_from: true,
after_to: false
})));
expect(
"1..>888",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(1)),
to: Position::Point(Point(888)),
before_from: false,
after_to: true
})));
expect(
"102.110",
LocOp::Loc(Loc::Local(Local::Within { from: Point(102), to: Point(110) })));
expect(
"123^124",
LocOp::Loc(Loc::Local(Local::Between(Between(123, 124)))));
expect(
"join(12..78)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(12, 78)))]));
expect(
"join(12..78,134..202)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(12, 78))),
LocOp::Loc(Loc::Local(Local::span(134, 202)))]));
expect(
"complement(34..126)",
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(34, 126))))));
expect(
"complement(join(2691..4571,4918..5163))",
LocOp::Complement(Box::new(LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(2691, 4571))),
LocOp::Loc(Loc::Local(Local::span(4918, 5163)))
]))));
expect(
"join(complement(4918..5163),complement(2691..4571))",
LocOp::Join(vec![
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(4918, 5163))))),
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(2691, 4571)))))
]));
expect(
"J00194.1:100..202",
LocOp::Loc(Loc::Remote{ within: String::from("J00194.1"), at: Local::span(100, 202) }));
expect(
"join(1..100,J00194.1:100..202)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(1, 100))),
LocOp::Loc(Loc::Remote { within: String::from("J00194.1"), at: Local::span(100, 202)})
]));
}
} | random_line_split |
|
feature_table.rs | //! # Feature Table
//!
//! Data model and parsers for the DDBJ/ENA/GenBank Feature Table.
//!
//! See: http://www.insdc.org/files/feature_table.html
use nom::{
IResult,
branch::{
alt,
},
bytes::complete::{
tag,
take_while_m_n,
take_while,
take_while1,
},
character::{
is_alphanumeric,
},
combinator::{
cut,
map,
opt,
verify,
},
error::{
ParseError,
},
multi::{
// many1,
separated_list,
},
sequence::{
tuple,
},
};
use super::parser::Nommed;
#[derive(Debug, PartialEq, Eq)]
pub struct FeatureTable {
features: Vec<FeatureRecord>
}
#[derive(Debug, PartialEq, Eq)]
pub struct FeatureRecord {
key: String,
location: LocOp,
qualifiers: Vec<Qualifier>
}
// impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for FeatureRecord {
// fn nom(input: &'a str) -> IResult<&'a str, FeatureRecord, E> {
// }
// }
/// An ID that's valid within the feature table.
///
/// This is:
/// * At least one letter
/// * Upper case, lower case letters
/// * Numbers 0..9
/// * Underscore (_)
/// * Hyphen (-)
/// * Single quote (')
/// * Asterisk (*)
/// The maximum length is 20 characters.
#[derive(Debug, PartialEq, Eq)]
pub struct FtString(String);
// litle utility for ranges.
//
// Note: couldn't use 'a'..='b' because this is an iterator, so doesn't
// implement `Copy`.
#[derive(Clone, Copy)]
struct Interval<T>(T, T);
impl <T : PartialOrd> Interval<T> {
fn contains(&self, e: &T) -> bool {
self.0 <= *e &&
*e <= self.1
}
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for FtString {
fn nom(input: &'a str) -> IResult<&'a str, FtString, E> {
let uc = Interval('A', 'Z');
let lc = Interval('a', 'z');
let di = Interval('0', '9');
let misc = "_-'*";
let ft_char = {
move |c: char|
uc.contains(&c) ||
lc.contains(&c) ||
di.contains(&c) ||
misc.contains(c)
};
let alpha = {
move |c: char|
uc.contains(&c) ||
lc.contains(&c)
};
map(
verify(
take_while_m_n(1, 20, ft_char),
move |s: &str| s.chars().any(alpha)
),
|s: &str| FtString(s.to_string())
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Qualifier {
name: FtString,
value: Option<QualifierValue>
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Qualifier {
fn nom(input: &'a str) -> IResult<&'a str, Qualifier, E> {
let parse_name = map(tuple((tag("/"), FtString::nom)), |(_, n)| n);
let parse_value = map(tuple((tag("="), QualifierValue::nom)), |(_, v)| v);
map(
tuple((parse_name, opt(parse_value))),
|(name, value)| Qualifier{ name, value }
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum QualifierValue {
QuotedText(String),
VocabularyTerm(FtString),
ReferenceNumber(u32),
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for QualifierValue{
fn nom(input: &'a str) -> IResult<&'a str, QualifierValue, E> {
let parse_quoted_text =
map(
tuple((tag("\""), take_while(|c| c!= '"'), tag("\""))),
|(_, v, _): (&str, &str, &str)| QualifierValue::QuotedText(v.to_string()));
let parse_vocabulary_term =
map(
FtString::nom,
QualifierValue::VocabularyTerm);
let parse_reference_number =
map(
tuple((tag("["), u32::nom, tag("]"))),
|(_, d, _)| QualifierValue::ReferenceNumber(d));
alt((
parse_quoted_text,
parse_vocabulary_term,
parse_reference_number
))(input)
}
}
//
//
// Location data model starts here
//
// Should really be in a sub-module I guess
//
//
/// A point within a sequence, representing a specific nucleotide. Counts from 1.
#[derive(Debug, PartialEq, Eq)]
pub struct Point(u32);
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Point {
fn nom(input: &'a str) -> IResult<&'a str, Point, E> {
map(u32::nom, Point)(input)
}
}
/// A position between two bases in a sequence.
/// pub
/// For example, 122^123. The locations must be consecutive.
///
/// For example, 100^1 for a circular sequence of length 100.
#[derive(Debug, PartialEq, Eq)]
pub struct Between(u32, u32);
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Between {
fn nom(input: &'a str) -> IResult<&'a str, Between, E> {
map(
tuple((
u32::nom,
tag("^"),
u32::nom
)),
|(from, _, to)| Between(from, to)
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Position {
Point(Point),
Between(Between)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Position {
fn nom(input: &'a str) -> IResult<&'a str, Position, E> {
alt((
map(Between::nom, Position::Between),
map(Point::nom, Position::Point)
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Local {
Point(Point),
Between(Between),
Within { from: Point, to: Point },
Span { from: Position, to: Position, before_from: bool, after_to: bool },
}
impl Local {
pub fn span(from: u32, to: u32) -> Local {
Local::Span {
from: Position::Point(Point(from)),
to: Position::Point(Point(to)),
before_from: false,
after_to: false }
}
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Local {
fn nom(input: &'a str) -> IResult<&'a str, Local, E> | ))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Loc {
Remote { within: String, at: Local },
Local(Local)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Loc {
fn nom(input: &'a str) -> IResult<&'a str, Loc, E> {
let parse_accession = take_while1(|c| {
let b = c as u8;
is_alphanumeric(b) || b == b'.'
});
alt((
map(
tuple((parse_accession, tag(":"), Local::nom)),
|(within, _, at)| Loc::Remote { within: within.to_string(), at }
),
map(Local::nom, Loc::Local)
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum LocOp {
Loc(Loc),
Complement(Box<LocOp>),
Join(Vec<LocOp>),
Order(Vec<LocOp>)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for LocOp {
fn nom(input: &'a str) -> IResult<&'a str, LocOp, E> {
let parse_complement =
map(
tuple((
tag("complement("),
cut(LocOp::nom),
tag(")")
)),
|(_, loc, _)| loc
);
let parse_join =
map(
tuple((
tag("join("),
cut(separated_list(tag(","), LocOp::nom)),
tag(")")
)),
|(_, locs, _)| locs
);
let parse_order =
map(
tuple((
tag("order("),
cut(separated_list(tag(","), LocOp::nom)),
tag(")")
)),
|(_, locs, _)| locs
);
alt((
map(Loc::nom, LocOp::Loc),
map(parse_complement, |loc| LocOp::Complement(Box::new(loc))),
map(parse_join, LocOp::Join),
map(parse_order, LocOp::Order)
))(input)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::error::{
convert_error,
VerboseError,
};
fn assert_nom_to_expected<'a, T>() -> impl Fn(&'a str, T) -> ()
where
T: Nommed<&'a str, VerboseError<&'a str>> + std::fmt::Debug + PartialEq
{
move |input: &str, expected: T| {
match T::nom(input) {
Ok((rem, ref res)) if!rem.is_empty() => panic!("Non-empty remaining input {}, parsed out {:?}", rem, res),
Ok((_, res)) => assert_eq!(res, expected, "Got result {:?} but expected {:?}", res, expected),
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => panic!("Problem: {}", convert_error(input, e)),
e => panic!("Unknown error: {:?}", e)
}
}
}
// #[test]
// fn test_parse_feature_record_from_spec() {
// let expect = assert_nom_to_expected::<FeatureRecord>();
// expect(
// r#"
// source 1..1000
// /culture_collection="ATCC:11775"
// /culture_collection="CECT:515"
// "#,
// FeatureRecord {
// key: "source".to_string(),
// location: LocOp::Loc(Loc::Local(Local::span(1, 1000))),
// qualifiers: vec![]
// }
// )
// }
#[test]
fn test_parse_qualifiers_from_spec() {
let expect = assert_nom_to_expected::<Qualifier>();
expect(
"/pseudo",
Qualifier {
name: FtString("pseudo".to_string()),
value: None });
expect(
"/citation=[1]",
Qualifier {
name: FtString("citation".to_string()),
value: Some(QualifierValue::ReferenceNumber(1)) });
expect(
"/gene=\"arsC\"",
Qualifier {
name: FtString("gene".to_string()),
value: Some(QualifierValue::QuotedText("arsC".to_string()))});
expect(
"/rpt_type=DISPERSED",
Qualifier {
name: FtString("rpt_type".to_string()),
value: Some(QualifierValue::VocabularyTerm(FtString("DISPERSED".to_string())))});
}
#[test]
fn test_parse_locations_from_spec() {
let expect = assert_nom_to_expected::<LocOp>();
expect(
"467",
LocOp::Loc(Loc::Local(Local::Point(Point(467)))));
expect(
"340..565",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(340)),
to: Position::Point(Point(565)),
before_from: false,
after_to: false
})));
expect(
"<345..500",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(345)),
to: Position::Point(Point(500)),
before_from: true,
after_to: false
})));
expect(
"<1..888",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(1)),
to: Position::Point(Point(888)),
before_from: true,
after_to: false
})));
expect(
"1..>888",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(1)),
to: Position::Point(Point(888)),
before_from: false,
after_to: true
})));
expect(
"102.110",
LocOp::Loc(Loc::Local(Local::Within { from: Point(102), to: Point(110) })));
expect(
"123^124",
LocOp::Loc(Loc::Local(Local::Between(Between(123, 124)))));
expect(
"join(12..78)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(12, 78)))]));
expect(
"join(12..78,134..202)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(12, 78))),
LocOp::Loc(Loc::Local(Local::span(134, 202)))]));
expect(
"complement(34..126)",
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(34, 126))))));
expect(
"complement(join(2691..4571,4918..5163))",
LocOp::Complement(Box::new(LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(2691, 4571))),
LocOp::Loc(Loc::Local(Local::span(4918, 5163)))
]))));
expect(
"join(complement(4918..5163),complement(2691..4571))",
LocOp::Join(vec![
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(4918, 5163))))),
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(2691, 4571)))))
]));
expect(
"J00194.1:100..202",
LocOp::Loc(Loc::Remote{ within: String::from("J00194.1"), at: Local::span(100, 202) }));
expect(
"join(1..100,J00194.1:100..202)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(1, 100))),
LocOp::Loc(Loc::Remote { within: String::from("J00194.1"), at: Local::span(100, 202)})
]));
}
} | {
let parse_within = map(
tuple((Point::nom, tag("."), Point::nom)),
|(from, _, to)| Local::Within { from, to });
let parse_span = map(
tuple((
opt(tag("<")), Position::nom, tag(".."), opt(tag(">")), Position::nom)),
|(before_from, from, _, after_to, to)| Local::Span {
from,
to,
before_from: before_from.is_some(),
after_to: after_to.is_some() }
);
alt((
map(Between::nom, Local::Between),
parse_within,
parse_span,
map(Point::nom, Local::Point), // must do this last as it's a prefix of the others | identifier_body |
feature_table.rs | //! # Feature Table
//!
//! Data model and parsers for the DDBJ/ENA/GenBank Feature Table.
//!
//! See: http://www.insdc.org/files/feature_table.html
use nom::{
IResult,
branch::{
alt,
},
bytes::complete::{
tag,
take_while_m_n,
take_while,
take_while1,
},
character::{
is_alphanumeric,
},
combinator::{
cut,
map,
opt,
verify,
},
error::{
ParseError,
},
multi::{
// many1,
separated_list,
},
sequence::{
tuple,
},
};
use super::parser::Nommed;
#[derive(Debug, PartialEq, Eq)]
pub struct FeatureTable {
features: Vec<FeatureRecord>
}
#[derive(Debug, PartialEq, Eq)]
pub struct | {
key: String,
location: LocOp,
qualifiers: Vec<Qualifier>
}
// impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for FeatureRecord {
// fn nom(input: &'a str) -> IResult<&'a str, FeatureRecord, E> {
// }
// }
/// An ID that's valid within the feature table.
///
/// This is:
/// * At least one letter
/// * Upper case, lower case letters
/// * Numbers 0..9
/// * Underscore (_)
/// * Hyphen (-)
/// * Single quote (')
/// * Asterisk (*)
/// The maximum length is 20 characters.
#[derive(Debug, PartialEq, Eq)]
pub struct FtString(String);
// litle utility for ranges.
//
// Note: couldn't use 'a'..='b' because this is an iterator, so doesn't
// implement `Copy`.
#[derive(Clone, Copy)]
struct Interval<T>(T, T);
impl <T : PartialOrd> Interval<T> {
fn contains(&self, e: &T) -> bool {
self.0 <= *e &&
*e <= self.1
}
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for FtString {
fn nom(input: &'a str) -> IResult<&'a str, FtString, E> {
let uc = Interval('A', 'Z');
let lc = Interval('a', 'z');
let di = Interval('0', '9');
let misc = "_-'*";
let ft_char = {
move |c: char|
uc.contains(&c) ||
lc.contains(&c) ||
di.contains(&c) ||
misc.contains(c)
};
let alpha = {
move |c: char|
uc.contains(&c) ||
lc.contains(&c)
};
map(
verify(
take_while_m_n(1, 20, ft_char),
move |s: &str| s.chars().any(alpha)
),
|s: &str| FtString(s.to_string())
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Qualifier {
name: FtString,
value: Option<QualifierValue>
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Qualifier {
fn nom(input: &'a str) -> IResult<&'a str, Qualifier, E> {
let parse_name = map(tuple((tag("/"), FtString::nom)), |(_, n)| n);
let parse_value = map(tuple((tag("="), QualifierValue::nom)), |(_, v)| v);
map(
tuple((parse_name, opt(parse_value))),
|(name, value)| Qualifier{ name, value }
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum QualifierValue {
QuotedText(String),
VocabularyTerm(FtString),
ReferenceNumber(u32),
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for QualifierValue{
fn nom(input: &'a str) -> IResult<&'a str, QualifierValue, E> {
let parse_quoted_text =
map(
tuple((tag("\""), take_while(|c| c!= '"'), tag("\""))),
|(_, v, _): (&str, &str, &str)| QualifierValue::QuotedText(v.to_string()));
let parse_vocabulary_term =
map(
FtString::nom,
QualifierValue::VocabularyTerm);
let parse_reference_number =
map(
tuple((tag("["), u32::nom, tag("]"))),
|(_, d, _)| QualifierValue::ReferenceNumber(d));
alt((
parse_quoted_text,
parse_vocabulary_term,
parse_reference_number
))(input)
}
}
//
//
// Location data model starts here
//
// Should really be in a sub-module I guess
//
//
/// A point within a sequence, representing a specific nucleotide. Counts from 1.
#[derive(Debug, PartialEq, Eq)]
pub struct Point(u32);
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Point {
fn nom(input: &'a str) -> IResult<&'a str, Point, E> {
map(u32::nom, Point)(input)
}
}
/// A position between two bases in a sequence.
/// pub
/// For example, 122^123. The locations must be consecutive.
///
/// For example, 100^1 for a circular sequence of length 100.
#[derive(Debug, PartialEq, Eq)]
pub struct Between(u32, u32);
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Between {
fn nom(input: &'a str) -> IResult<&'a str, Between, E> {
map(
tuple((
u32::nom,
tag("^"),
u32::nom
)),
|(from, _, to)| Between(from, to)
)(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Position {
Point(Point),
Between(Between)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Position {
fn nom(input: &'a str) -> IResult<&'a str, Position, E> {
alt((
map(Between::nom, Position::Between),
map(Point::nom, Position::Point)
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Local {
Point(Point),
Between(Between),
Within { from: Point, to: Point },
Span { from: Position, to: Position, before_from: bool, after_to: bool },
}
impl Local {
pub fn span(from: u32, to: u32) -> Local {
Local::Span {
from: Position::Point(Point(from)),
to: Position::Point(Point(to)),
before_from: false,
after_to: false }
}
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Local {
fn nom(input: &'a str) -> IResult<&'a str, Local, E> {
let parse_within = map(
tuple((Point::nom, tag("."), Point::nom)),
|(from, _, to)| Local::Within { from, to });
let parse_span = map(
tuple((
opt(tag("<")), Position::nom, tag(".."), opt(tag(">")), Position::nom)),
|(before_from, from, _, after_to, to)| Local::Span {
from,
to,
before_from: before_from.is_some(),
after_to: after_to.is_some() }
);
alt((
map(Between::nom, Local::Between),
parse_within,
parse_span,
map(Point::nom, Local::Point), // must do this last as it's a prefix of the others
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Loc {
Remote { within: String, at: Local },
Local(Local)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for Loc {
fn nom(input: &'a str) -> IResult<&'a str, Loc, E> {
let parse_accession = take_while1(|c| {
let b = c as u8;
is_alphanumeric(b) || b == b'.'
});
alt((
map(
tuple((parse_accession, tag(":"), Local::nom)),
|(within, _, at)| Loc::Remote { within: within.to_string(), at }
),
map(Local::nom, Loc::Local)
))(input)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum LocOp {
Loc(Loc),
Complement(Box<LocOp>),
Join(Vec<LocOp>),
Order(Vec<LocOp>)
}
impl <'a, E : ParseError<&'a str>> Nommed<&'a str, E> for LocOp {
fn nom(input: &'a str) -> IResult<&'a str, LocOp, E> {
let parse_complement =
map(
tuple((
tag("complement("),
cut(LocOp::nom),
tag(")")
)),
|(_, loc, _)| loc
);
let parse_join =
map(
tuple((
tag("join("),
cut(separated_list(tag(","), LocOp::nom)),
tag(")")
)),
|(_, locs, _)| locs
);
let parse_order =
map(
tuple((
tag("order("),
cut(separated_list(tag(","), LocOp::nom)),
tag(")")
)),
|(_, locs, _)| locs
);
alt((
map(Loc::nom, LocOp::Loc),
map(parse_complement, |loc| LocOp::Complement(Box::new(loc))),
map(parse_join, LocOp::Join),
map(parse_order, LocOp::Order)
))(input)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::error::{
convert_error,
VerboseError,
};
fn assert_nom_to_expected<'a, T>() -> impl Fn(&'a str, T) -> ()
where
T: Nommed<&'a str, VerboseError<&'a str>> + std::fmt::Debug + PartialEq
{
move |input: &str, expected: T| {
match T::nom(input) {
Ok((rem, ref res)) if!rem.is_empty() => panic!("Non-empty remaining input {}, parsed out {:?}", rem, res),
Ok((_, res)) => assert_eq!(res, expected, "Got result {:?} but expected {:?}", res, expected),
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => panic!("Problem: {}", convert_error(input, e)),
e => panic!("Unknown error: {:?}", e)
}
}
}
// #[test]
// fn test_parse_feature_record_from_spec() {
// let expect = assert_nom_to_expected::<FeatureRecord>();
// expect(
// r#"
// source 1..1000
// /culture_collection="ATCC:11775"
// /culture_collection="CECT:515"
// "#,
// FeatureRecord {
// key: "source".to_string(),
// location: LocOp::Loc(Loc::Local(Local::span(1, 1000))),
// qualifiers: vec![]
// }
// )
// }
#[test]
fn test_parse_qualifiers_from_spec() {
let expect = assert_nom_to_expected::<Qualifier>();
expect(
"/pseudo",
Qualifier {
name: FtString("pseudo".to_string()),
value: None });
expect(
"/citation=[1]",
Qualifier {
name: FtString("citation".to_string()),
value: Some(QualifierValue::ReferenceNumber(1)) });
expect(
"/gene=\"arsC\"",
Qualifier {
name: FtString("gene".to_string()),
value: Some(QualifierValue::QuotedText("arsC".to_string()))});
expect(
"/rpt_type=DISPERSED",
Qualifier {
name: FtString("rpt_type".to_string()),
value: Some(QualifierValue::VocabularyTerm(FtString("DISPERSED".to_string())))});
}
#[test]
fn test_parse_locations_from_spec() {
let expect = assert_nom_to_expected::<LocOp>();
expect(
"467",
LocOp::Loc(Loc::Local(Local::Point(Point(467)))));
expect(
"340..565",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(340)),
to: Position::Point(Point(565)),
before_from: false,
after_to: false
})));
expect(
"<345..500",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(345)),
to: Position::Point(Point(500)),
before_from: true,
after_to: false
})));
expect(
"<1..888",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(1)),
to: Position::Point(Point(888)),
before_from: true,
after_to: false
})));
expect(
"1..>888",
LocOp::Loc(Loc::Local(Local::Span {
from: Position::Point(Point(1)),
to: Position::Point(Point(888)),
before_from: false,
after_to: true
})));
expect(
"102.110",
LocOp::Loc(Loc::Local(Local::Within { from: Point(102), to: Point(110) })));
expect(
"123^124",
LocOp::Loc(Loc::Local(Local::Between(Between(123, 124)))));
expect(
"join(12..78)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(12, 78)))]));
expect(
"join(12..78,134..202)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(12, 78))),
LocOp::Loc(Loc::Local(Local::span(134, 202)))]));
expect(
"complement(34..126)",
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(34, 126))))));
expect(
"complement(join(2691..4571,4918..5163))",
LocOp::Complement(Box::new(LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(2691, 4571))),
LocOp::Loc(Loc::Local(Local::span(4918, 5163)))
]))));
expect(
"join(complement(4918..5163),complement(2691..4571))",
LocOp::Join(vec![
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(4918, 5163))))),
LocOp::Complement(Box::new(LocOp::Loc(Loc::Local(Local::span(2691, 4571)))))
]));
expect(
"J00194.1:100..202",
LocOp::Loc(Loc::Remote{ within: String::from("J00194.1"), at: Local::span(100, 202) }));
expect(
"join(1..100,J00194.1:100..202)",
LocOp::Join(vec![
LocOp::Loc(Loc::Local(Local::span(1, 100))),
LocOp::Loc(Loc::Remote { within: String::from("J00194.1"), at: Local::span(100, 202)})
]));
}
} | FeatureRecord | identifier_name |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps,.. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds |
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while!runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data)!= best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward!= 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward!= 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value!= 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
})
.collect();
let mut result = self.clone();
tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
}
| {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
} | conditional_block |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps,.. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while!runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data)!= best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward!= 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward!= 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value!= 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
}) | tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
} | .collect();
let mut result = self.clone(); | random_line_split |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds | }
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct HillClimbSeedInfo<'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps,.. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while!runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data)!= best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward!= 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward!= 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value!= 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
})
.collect();
let mut result = self.clone();
tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
}
| {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
} | identifier_body |
condition_strategy_generators.rs | use crate::ai_utils::playout_result;
use crate::competing_optimizers::StrategyOptimizer;
use crate::condition_strategy::{
Condition, ConditionKind, ConditionStrategy, EvaluatedPriorities, EvaluationData, Rule,
};
use crate::representative_sampling::NewFractalRepresentativeSeedSearch;
use crate::seed_system::{Seed, SingleSeed, SingleSeedGenerator};
use crate::seeds_concrete::CombatChoiceLineagesKind;
use crate::simulation::{Runner, StandardRunner};
use crate::simulation_state::CombatState;
use rand::seq::SliceRandom;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use rand_distr::StandardNormal;
use serde::{Deserialize, Serialize};
use smallvec::alloc::fmt::Formatter;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
pub type SeedSearch = NewFractalRepresentativeSeedSearch<
ConditionStrategy,
SingleSeed<CombatChoiceLineagesKind>,
SingleSeedGenerator,
>;
pub struct StrategyGeneratorsWithSharedRepresenativeSeeds {
pub seed_search: SeedSearch,
pub generators: Vec<SharingGenerator>,
}
pub struct SharingGenerator {
pub time_used: Duration,
pub generator: GeneratorKind,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum GeneratorKind {
HillClimb {
steps: usize,
num_verification_seeds: usize,
start: HillClimbStart,
kind: HillClimbKind,
},
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbStart {
NewRandom,
FromSeedSearch,
}
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
pub enum HillClimbKind {
BunchOfRandomChanges,
BunchOfRandomChangesInspired,
OneRelevantRule,
}
impl StrategyGeneratorsWithSharedRepresenativeSeeds {
pub fn new(
starting_state: CombatState,
rng: &mut impl Rng,
) -> StrategyGeneratorsWithSharedRepresenativeSeeds {
let mut generators = Vec::new();
for steps in (0..=8).map(|i| 1 << i) {
for num_verification_seeds in (0..=5).map(|i| 1 << i) {
for &start in &[HillClimbStart::NewRandom, HillClimbStart::FromSeedSearch] {
for &kind in &[
HillClimbKind::BunchOfRandomChanges,
HillClimbKind::BunchOfRandomChangesInspired,
HillClimbKind::OneRelevantRule,
] {
generators.push(SharingGenerator {
time_used: Duration::from_secs(0),
generator: GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
},
});
}
}
}
}
StrategyGeneratorsWithSharedRepresenativeSeeds {
seed_search: NewFractalRepresentativeSeedSearch::new(
starting_state,
SingleSeedGenerator::new(ChaCha8Rng::from_rng(rng).unwrap()),
Default::default(),
),
generators,
}
}
pub fn step(&mut self, rng: &mut impl Rng) {
let generator = self
.generators
.iter_mut()
.min_by_key(|g| g.time_used)
.unwrap();
let start = Instant::now();
let strategy = generator.generator.gen_strategy(&self.seed_search, rng);
let duration = start.elapsed();
generator.time_used += duration;
self.seed_search.consider_strategy(
Arc::new(strategy),
generator.generator.min_playouts_before_culling(),
rng,
);
}
}
pub struct | <'a> {
pub seed: &'a SingleSeed<CombatChoiceLineagesKind>,
pub current_score: f64,
}
impl GeneratorKind {
pub fn min_playouts_before_culling(&self) -> usize {
match self {
&GeneratorKind::HillClimb { steps,.. } => steps.min(32),
}
}
pub fn gen_strategy(&self, seed_search: &SeedSearch, rng: &mut impl Rng) -> ConditionStrategy {
match self {
&GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start,
kind,
} => {
let mut current = match start {
HillClimbStart::NewRandom => {
ConditionStrategy::fresh_distinctive_candidate(&seed_search.starting_state, rng)
}
HillClimbStart::FromSeedSearch => seed_search
.strategies
.choose(rng)
.unwrap()
.strategy
.deref()
.clone(),
};
let mut verification_seeds: Vec<_> = seed_search
.seeds
.iter()
.take(num_verification_seeds)
.collect();
// hack - the seed search may not have generated this many (or any) seeds yet
let extra_seeds;
if verification_seeds.len() < num_verification_seeds {
extra_seeds = (verification_seeds.len()..num_verification_seeds)
.map(|_| SingleSeed::new(rng))
.collect::<Vec<_>>();
verification_seeds.extend(extra_seeds.iter());
}
let mut verification_seeds: Vec<_> = verification_seeds
.into_iter()
.map(|s| HillClimbSeedInfo {
seed: s,
current_score: playout_result(&seed_search.starting_state, s.view(), ¤t).score,
})
.collect();
let mut improvements = 0;
let mut improvements_on_first = 0;
for _ in 0..steps {
verification_seeds.shuffle(rng);
let (first, rest) = verification_seeds.split_first().unwrap();
let new = kind.hill_climb_candidate(seed_search, ¤t, &verification_seeds, rng);
let first_score =
playout_result(&seed_search.starting_state, first.seed.view(), &new).score;
if first_score <= verification_seeds[0].current_score {
continue;
}
improvements_on_first += 1;
let new_scores: Vec<_> = std::iter::once(first_score)
.chain(
rest
.iter()
.map(|s| playout_result(&seed_search.starting_state, s.seed.view(), &new).score),
)
.collect();
if new_scores.iter().sum::<f64>()
> verification_seeds
.iter()
.map(|s| s.current_score)
.sum::<f64>()
{
current = new;
for (info, new_score) in verification_seeds.iter_mut().zip(new_scores) {
info.current_score = new_score;
}
improvements += 1;
}
}
current.annotation = format!(
"{} + {}/{}/{}",
current.annotation, improvements, improvements_on_first, self
);
current
}
}
}
}
impl HillClimbKind {
fn hill_climb_candidate(
&self,
seed_search: &SeedSearch,
current: &ConditionStrategy,
verification_seeds: &[HillClimbSeedInfo],
rng: &mut impl Rng,
) -> ConditionStrategy {
let (first, _rest) = verification_seeds.split_first().unwrap();
match self {
HillClimbKind::BunchOfRandomChanges => {
current.bunch_of_random_changes(&seed_search.starting_state, rng, &[])
}
HillClimbKind::BunchOfRandomChangesInspired => current.bunch_of_random_changes(
&seed_search.starting_state,
rng,
&seed_search
.strategies
.iter()
.map(|s| &*s.strategy)
.collect::<Vec<_>>(),
),
HillClimbKind::OneRelevantRule => {
let mut state = seed_search.starting_state.clone();
let mut runner = StandardRunner::new(&mut state, first.seed.view());
let mut candidate_rules = Vec::new();
while!runner.state().combat_over() {
let state = runner.state();
let data = EvaluationData::new(state);
let priorities = EvaluatedPriorities::evaluated(¤t.rules, state, &data);
let best_index = priorities.best_index();
for _ in 0..50 {
let condition = Condition::random_generally_relevant_choice_distinguisher(state, rng);
let mut rule = Rule {
conditions: vec![condition],
flat_reward: rng.sample(StandardNormal),
..Default::default()
};
if priorities.best_index_with_extra_rule(&rule, state, &data)!= best_index {
for _ in 0..rng.gen_range(0..=2) {
for _ in 0..50 {
let condition =
Condition::random_generally_relevant_state_distinguisher(state, rng);
if condition.evaluate(state, &data.contexts().next().unwrap()) {
rule.conditions.push(condition);
break;
}
}
}
candidate_rules.push(rule);
break;
}
}
let choice = &data.choices[best_index].choice;
runner.apply_choice(&choice);
}
let mut new = current.clone();
if let Some(new_rule) = candidate_rules.choose(rng) {
new.rules.push(new_rule.clone())
}
new
}
}
}
}
impl Display for GeneratorKind {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GeneratorKind::HillClimb {
steps,
num_verification_seeds,
start: _,
kind,
} => {
write!(f, "{}x{:?}@{}", steps, kind, num_verification_seeds)
}
}
}
}
impl StrategyOptimizer for StrategyGeneratorsWithSharedRepresenativeSeeds {
type Strategy = ConditionStrategy;
fn step(&mut self, _state: &CombatState, rng: &mut ChaCha8Rng) {
self.step(rng);
}
fn report(&self) -> Arc<Self::Strategy> {
let result = self.seed_search.best_strategy();
self.seed_search.report();
println!("StrategyGeneratorsWithSharedRepresenativeSeeds top strategies:");
for strategy in &self.seed_search.strategies {
println!("{}", strategy.strategy.annotation);
}
result
}
}
impl ConditionStrategy {
pub fn bunch_of_random_changes(
&self,
state: &CombatState,
rng: &mut impl Rng,
promising_strategies: &[&ConditionStrategy],
) -> ConditionStrategy {
fn tweak_rules(
rules: &mut Vec<Rule>,
state: &CombatState,
rng: &mut impl Rng,
promising_conditions: &[Condition],
) {
let remove_chance = 0.05f64.min(2.0 / rules.len() as f64);
rules.retain(|_| rng.gen::<f64>() > remove_chance);
for rule in rules.iter_mut() {
if rng.gen() {
if rule.flat_reward!= 0.0 {
rule.flat_reward += rng.sample::<f64, _>(StandardNormal) * 0.2;
}
if rule.block_per_energy_reward!= 0.0 {
rule.block_per_energy_reward += rng.sample::<f64, _>(StandardNormal) * 0.02;
}
for value in &mut rule.unblocked_damage_per_energy_rewards {
if *value!= 0.0 {
*value += rng.sample::<f64, _>(StandardNormal) * 0.01;
}
}
}
}
for _ in 0..rng.gen_range(10..30) {
let condition;
if rng.gen() || promising_conditions.is_empty() {
condition = Condition::random_generally_relevant_state_distinguisher(state, rng);
} else {
condition = promising_conditions.choose(rng).unwrap().clone();
}
if rng.gen() || rules.is_empty() {
rules.push(Rule {
conditions: vec![
Condition::random_generally_relevant_choice_distinguisher(state, rng),
condition,
],
flat_reward: rng.sample(StandardNormal),
..Default::default()
})
} else {
rules.choose_mut(rng).unwrap().conditions.push(condition);
}
}
}
let promising_conditions: Vec<_> = promising_strategies
.iter()
.flat_map(|s| {
s.rules
.iter()
.flat_map(|rule| &rule.conditions)
.filter(|c| {
!matches!(
c.kind,
ConditionKind::PlayCardId(_) | ConditionKind::UsePotionId(_)
)
})
.cloned()
})
.collect();
let mut result = self.clone();
tweak_rules(&mut result.rules, state, rng, &promising_conditions);
result
}
}
| HillClimbSeedInfo | identifier_name |
runner.rs | use core::fmt::{Display, Formatter, Result as FmtResult};
use std::error::Error as StdError;
use std::sync::mpsc::{sync_channel, SyncSender, TryRecvError, TrySendError};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use std::{cell::RefCell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError +'static)> {
match self {
Error::RendererError(e) => Some(e),
}
}
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where | #[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn elapsed() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F:'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if!is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some(r) = State::with_mut(|x| x.input_state.handle_event(&event)) {
match r {
EventHandleResult::Input(event) => game.input(event),
EventHandleResult::Resized(size) => {
game.set_size(Size::new(size.width, size.height))
}
EventHandleResult::Exit => {
game.close();
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
}
}
Event::Crash(e) => {
game.crash(e);
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
false
}
} | F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
| random_line_split |
runner.rs | use core::fmt::{Display, Formatter, Result as FmtResult};
use std::error::Error as StdError;
use std::sync::mpsc::{sync_channel, SyncSender, TryRecvError, TrySendError};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use std::{cell::RefCell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError +'static)> |
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
#[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn elapsed() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F:'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if!is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some(r) = State::with_mut(|x| x.input_state.handle_event(&event)) {
match r {
EventHandleResult::Input(event) => game.input(event),
EventHandleResult::Resized(size) => {
game.set_size(Size::new(size.width, size.height))
}
EventHandleResult::Exit => {
game.close();
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
}
}
Event::Crash(e) => {
game.crash(e);
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
false
}
}
| {
match self {
Error::RendererError(e) => Some(e),
}
} | identifier_body |
runner.rs | use core::fmt::{Display, Formatter, Result as FmtResult};
use std::error::Error as StdError;
use std::sync::mpsc::{sync_channel, SyncSender, TryRecvError, TrySendError};
use std::thread::{sleep, spawn};
use std::time::{Duration, Instant};
use std::{cell::RefCell, sync::mpsc::Receiver};
use skulpin_renderer::{ash, LogicalSize, RendererBuilder};
use ash::vk::Result as VkResult;
use crate::skia::{Color, Matrix, Picture, PictureRecorder, Point, Rect, Size};
use super::input::{EventHandleResult, InputState};
use super::time::TimeState;
use super::Game;
use super::{default_font_set::DefaultFontSet, FontSet};
use sdl2::event::Event as Sdl2Event;
use skulpin_renderer_sdl2::{sdl2, Sdl2Window};
enum Event {
Sdl2Event(Sdl2Event),
Crash(Error),
}
enum FeedbackEvent {
Exit,
}
#[derive(Debug)]
pub enum Error {
RendererError(VkResult),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match self {
Error::RendererError(e) => e.fmt(f),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError +'static)> {
match self {
Error::RendererError(e) => Some(e),
}
}
}
impl From<VkResult> for Error {
fn from(result: VkResult) -> Self {
Error::RendererError(result)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ID(u64);
impl ID {
pub fn next() -> Self {
Self(State::with_mut(|x| {
let id = x.id_keeper;
x.id_keeper += 1;
id
}))
}
}
pub struct State {
pub input_state: InputState,
pub time_state: TimeState,
pub time_state_draw: TimeState,
pub font_set: Box<dyn FontSet>,
id_keeper: u64,
}
impl State {
const PANIC_MESSAGE: &'static str = "Attempt to get game state while game is uninitialised";
thread_local!(pub static STATE: RefCell<Option<State>> = RefCell::new(None));
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&State) -> R,
{
Self::STATE.with(|x| f(x.borrow().as_ref().expect(Self::PANIC_MESSAGE)))
}
#[inline]
pub fn with_mut<F, R>(f: F) -> R
where
F: FnOnce(&mut State) -> R,
{
Self::STATE.with(|x| f(x.borrow_mut().as_mut().expect(Self::PANIC_MESSAGE)))
}
pub fn last_update_time() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.last_update_time()
})
}
pub fn | () -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state
.elapsed()
})
}
pub fn last_update_time_draw() -> Duration {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.time_state_draw
.last_update_time()
})
}
pub fn mouse_position() -> Point {
Self::STATE.with(|x| {
x.borrow()
.as_ref()
.expect(Self::PANIC_MESSAGE)
.input_state
.mouse_position
})
}
}
pub struct Runner;
impl Runner {
pub const PIC_QUEUE_LENGTH: usize = 1;
pub const EVENT_QUEUE_SIZE: usize = 8;
pub const FEEDBACK_QUEUE_SIZE: usize = 1;
pub const BACKGROUND: Color = Color::from_argb(255, 10, 10, 10);
pub fn run<F, T>(
game: F,
inner_size: LogicalSize,
window_title: &str,
renderer_builder: RendererBuilder,
) where
F:'static + Send + FnOnce() -> T,
T: Game,
{
let sdl_context = sdl2::init().expect("Failed to initialize SDL2");
let video_subsystem = sdl_context
.video()
.expect("Failed to create SDL2 video subsystem");
let sdl_window = video_subsystem
.window(window_title, inner_size.width, inner_size.height)
.resizable()
.build()
.expect("Failed to create game window");
let window = Sdl2Window::new(&sdl_window);
sdl_context.mouse().show_cursor(false);
let (pic_tx, pic_rx) = sync_channel(Self::PIC_QUEUE_LENGTH);
let (event_tx, event_rx) = sync_channel(Self::EVENT_QUEUE_SIZE);
let (feedback_tx, feedback_rx) = sync_channel(Self::FEEDBACK_QUEUE_SIZE);
spawn(move || {
gstreamer::init().expect("Failed to initialize GStreamer");
let input_state = InputState::new(inner_size);
let time_state = TimeState::new();
let time_state_draw = TimeState::new();
State::STATE.with(|x| {
*x.borrow_mut() = Some(State {
input_state,
time_state,
time_state_draw,
font_set: Box::new(DefaultFontSet::new()),
id_keeper: 0,
});
});
let mut game = game();
game.set_size(
State::STATE.with(|x| x.borrow().as_ref().unwrap().input_state.window_size),
);
Self::game_thread(game, event_rx, pic_tx, feedback_tx);
});
let mut renderer = renderer_builder
.build(&window)
.expect("Failed to create renderer");
let mut event_pump = sdl_context
.event_pump()
.expect("Failed to create SDL2 event pump");
'events: loop {
match feedback_rx.try_recv() {
Ok(event) => match event {
FeedbackEvent::Exit => {
break 'events;
}
},
Err(e) => match e {
TryRecvError::Empty => {
for event in event_pump.poll_iter() {
if event_tx.send(Event::Sdl2Event(event)).is_err() {
break 'events;
}
}
match pic_rx.try_recv() {
Ok(pic) => {
if let Err(e) = renderer.draw(&window, |canvas, _| {
canvas.clear(Self::BACKGROUND);
canvas.draw_picture(pic, Some(&Matrix::default()), None);
}) {
let _ = event_tx.send(Event::Crash(e.into()));
break 'events;
}
}
Err(e) => match e {
TryRecvError::Empty => sleep(Duration::MILLISECOND),
TryRecvError::Disconnected => break 'events,
},
}
}
TryRecvError::Disconnected => break 'events,
},
}
}
}
fn game_thread(
mut game: impl Game,
event_rx: Receiver<Event>,
pic_tx: SyncSender<Picture>,
feedback_tx: SyncSender<FeedbackEvent>,
) {
let target_update_time = Duration::MILLISECOND; // 1000 fps
let target_frame_time = Duration::MILLISECOND * 8; // 120 fps
let mut last_frame = Instant::now();
loop {
game.update();
let mut is_redraw = false;
// TODO: is this loop the cause of bad VSync?
loop {
match event_rx.try_recv() {
Ok(event) => {
if Self::handle_event(&mut game, event, &feedback_tx) {
return;
}
}
Err(e) => match e {
TryRecvError::Empty => break,
TryRecvError::Disconnected => return,
},
}
}
let frame_time = last_frame.elapsed();
if frame_time > target_frame_time {
last_frame = Instant::now() - (frame_time - target_frame_time);
is_redraw = true;
let mut rec = PictureRecorder::new();
let bounds = Rect::from_size(State::with(|x| {
let w = x.input_state.window_size;
(w.width, w.height)
}));
let canvas = rec.begin_recording(bounds, None);
game.draw(canvas);
if let Err(why) = pic_tx.try_send(
rec.finish_recording_as_picture(None)
.expect("Failed to finish recording picture while rendering"),
) {
match why {
// Skip any unsent frames, just in case the renderer
// fails to catch up, and to prevent lockups.
TrySendError::Full(_) => {}
TrySendError::Disconnected(_) => {
panic!("Failed to send canvas to draw thread (disconnected channel)")
}
}
}
State::with_mut(|x| x.time_state_draw.update());
}
State::with_mut(|state| {
if!is_redraw {
let update_time = state.time_state.last_update().elapsed();
if target_update_time > update_time {
sleep(target_update_time - update_time);
}
}
state.time_state.update();
});
}
}
fn handle_event(
game: &mut impl Game,
event: Event,
feedback_tx: &SyncSender<FeedbackEvent>,
) -> bool {
match event {
Event::Sdl2Event(event) => {
if let Some(r) = State::with_mut(|x| x.input_state.handle_event(&event)) {
match r {
EventHandleResult::Input(event) => game.input(event),
EventHandleResult::Resized(size) => {
game.set_size(Size::new(size.width, size.height))
}
EventHandleResult::Exit => {
game.close();
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
}
}
Event::Crash(e) => {
game.crash(e);
feedback_tx
.send(FeedbackEvent::Exit)
.expect("Failed to send feedback event to draw thread");
return true;
}
}
false
}
}
| elapsed | identifier_name |
humantoken.rs | $name
,)*
];
};
}
define_prefixes! {
quetta Q 30 1000000000000000000000000000000,
ronna R 27 1000000000000000000000000000,
yotta Y 24 1000000000000000000000000,
zetta Z 21 1000000000000000000000,
exa E 18 1000000000000000000,
peta P 15 1000000000000000,
tera T 12 1000000000000,
giga G 9 1000000000,
mega M 6 1000000,
kilo k 3 1000,
// Leave this out because
// - it simplifies our printing logic
// - these are not commonly used
// - it's more consistent with lotus
//
// hecto h 2 100,
// deca da 1 10,
// deci d -1 0.1,
// centi c -2 0.01,
milli m -3 0.001,
micro μ or u -6 0.000001,
nano n -9 0.000000001,
pico p -12 0.000000000001,
femto f -15 0.000000000000001,
atto a -18 0.000000000000000001,
zepto z -21 0.000000000000000000001,
yocto y -24 0.000000000000000000000001,
ronto r -27 0.000000000000000000000000001,
quecto q -30 0.000000000000000000000000000001,
}
#[test]
fn sorted() {
let is_sorted_biggest_first = SUPPORTED_PREFIXES
.windows(2)
.all(|pair| pair[0].multiplier() > pair[1].multiplier());
assert!(is_sorted_biggest_first)
}
}
mod parse {
// ENHANCE(aatifsyed): could accept pairs like "1 nano 1 atto"
use crate::shim::econ::TokenAmount;
use anyhow::{anyhow, bail};
use bigdecimal::{BigDecimal, ParseBigDecimalError};
use nom::{
bytes::complete::tag,
character::complete::multispace0,
combinator::{map_res, opt},
error::{FromExternalError, ParseError},
number::complete::recognize_float,
sequence::terminated,
IResult,
};
use super::si;
/// Parse token amounts as floats with SI prefixed-units.
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmount, parse};
/// fn assert_attos(input: &str, attos: u64) {
/// let expected = TokenAmount::from_atto(attos);
/// let actual = parse(input).unwrap();
/// assert_eq!(expected, actual);
/// }
/// assert_attos("1a", 1);
/// assert_attos("1aFIL", 1);
/// assert_attos("1 femtoFIL", 1000);
/// assert_attos("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if!attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if!input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
// Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind(
input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pr |
attos: BigInt,
}
impl From<&TokenAmount> for Pretty {
fn from(value: &TokenAmount) -> Self {
Self {
attos: value.atto().clone(),
}
}
}
pub trait TokenAmountPretty {
fn pretty(&self) -> Pretty;
}
impl TokenAmountPretty for TokenAmount {
/// Note the following format specifiers:
/// - `{:#}`: print number of FIL, not e.g `milliFIL`
/// - `{:.4}`: round to 4 significant figures
/// - `{:.#4}`: both
///
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmountPretty as _, TokenAmount};
///
/// let amount = TokenAmount::from_nano(1500);
///
/// // Defaults to precise, with SI prefix
/// assert_eq!("1500 nanoFIL", format!("{}", amount.pretty()));
///
/// // Rounded to 1 s.f
/// assert_eq!("~2 microFIL", format!("{:.1}", amount.pretty()));
///
/// // Show absolute FIL
/// assert_eq!("0.0000015 FIL", format!("{:#}", amount.pretty()));
///
/// // Rounded absolute FIL
/// assert_eq!("~0.000002 FIL", format!("{:#.1}", amount.pretty()));
///
/// // We only indicate lost precision when relevant
/// assert_eq!("1500 nanoFIL", format!("{:.2}", amount.pretty()));
/// ```
///
/// # Formatting
/// - We select the most diminutive SI prefix (or not!) that allows us
/// to display an integer amount.
// RUST(aatifsyed): this should be -> impl fmt::Display
//
// Users shouldn't be able to name `Pretty` anyway
fn pretty(&self) -> Pretty {
Pretty::from(self)
}
}
impl fmt::Display for Pretty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let actual_fil = &self.attos * si::atto.multiplier();
// rounding
let fil_for_printing = match f.precision() {
None => actual_fil.normalized(),
Some(prec) => actual_fil
.with_prec(u64::try_from(prec).expect("requested precision is absurd"))
.normalized(),
};
let precision_was_lost = fil_for_printing!= actual_fil;
if precision_was_lost {
f.write_str("~")?;
}
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
} | etty { | identifier_name |
humantoken.rs | ).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if!input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
// Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind(
input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pretty {
attos: BigInt,
}
impl From<&TokenAmount> for Pretty {
fn from(value: &TokenAmount) -> Self {
Self {
attos: value.atto().clone(),
}
}
}
pub trait TokenAmountPretty {
fn pretty(&self) -> Pretty;
}
impl TokenAmountPretty for TokenAmount {
/// Note the following format specifiers:
/// - `{:#}`: print number of FIL, not e.g `milliFIL`
/// - `{:.4}`: round to 4 significant figures
/// - `{:.#4}`: both
///
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmountPretty as _, TokenAmount};
///
/// let amount = TokenAmount::from_nano(1500);
///
/// // Defaults to precise, with SI prefix
/// assert_eq!("1500 nanoFIL", format!("{}", amount.pretty()));
///
/// // Rounded to 1 s.f
/// assert_eq!("~2 microFIL", format!("{:.1}", amount.pretty()));
///
/// // Show absolute FIL
/// assert_eq!("0.0000015 FIL", format!("{:#}", amount.pretty()));
///
/// // Rounded absolute FIL
/// assert_eq!("~0.000002 FIL", format!("{:#.1}", amount.pretty()));
///
/// // We only indicate lost precision when relevant
/// assert_eq!("1500 nanoFIL", format!("{:.2}", amount.pretty()));
/// ```
///
/// # Formatting
/// - We select the most diminutive SI prefix (or not!) that allows us
/// to display an integer amount.
// RUST(aatifsyed): this should be -> impl fmt::Display
//
// Users shouldn't be able to name `Pretty` anyway
fn pretty(&self) -> Pretty {
Pretty::from(self)
}
}
impl fmt::Display for Pretty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let actual_fil = &self.attos * si::atto.multiplier();
// rounding
let fil_for_printing = match f.precision() {
None => actual_fil.normalized(),
Some(prec) => actual_fil
.with_prec(u64::try_from(prec).expect("requested precision is absurd"))
.normalized(),
};
let precision_was_lost = fil_for_printing!= actual_fil;
if precision_was_lost {
f.write_str("~")?;
}
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
}
#[test]
fn test_display() {
assert_eq!("0 FIL", format!("{}", attos("0").pretty()));
// Absolute works
assert_eq!("1 attoFIL", format!("{}", attos("1").pretty()));
assert_eq!(
"0.000000000000000001 FIL",
format!("{:#}", attos("1").pretty())
);
// We select the right suffix
assert_eq!("1 femtoFIL", format!("{}", attos("1000").pretty()));
assert_eq!("1001 attoFIL", format!("{}", attos("1001").pretty()));
// If you ask for 0 precision, you get it
assert_eq!("~0 FIL", format!("{:.0}", attos("1001").pretty()));
// Rounding without a prefix
assert_eq!("~10 FIL", format!("{:.1}", fils("11").pretty()));
// Rounding with absolute
assert_eq!(
"~0.000000000000002 FIL",
format!("{:#.1}", attos("1940").pretty())
);
assert_eq!(
"~0.0000000000000019 FIL",
format!("{:#.2}", attos("1940").pretty())
);
assert_eq!(
"0.00000000000000194 FIL",
format!("{:#.3}", attos("1940").pretty())
);
// Small numbers with a gap then a trailing one are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.2}", attos("1001").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.3}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.4}", attos("1001").pretty()));
assert_eq!("1001 attoFIL", format!("{:.5}", attos("1001").pretty()));
// Small numbers with trailing numbers are rounded down
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1234").pretty()));
assert_eq!("~1200 attoFIL", format!("{:.2}", attos("1234").pretty()));
assert_eq!("~1230 attoFIL", format!("{:.3}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.4}", attos("1234").pretty()));
assert_eq!("1234 attoFIL", format!("{:.5}", attos("1234").pretty()));
// Small numbers are rounded appropriately
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1900").pretty()));
assert_eq!("~2 femtoFIL", format!("{:.1}", attos("1500").pretty()));
assert_eq!("~1 femtoFIL", format!("{:.1}", attos("1400").pretty()));
// Big numbers with a gap then a trailing one are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.2}", fils("1001").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.3}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.4}", fils("1001").pretty()));
assert_eq!("1001 FIL", format!("{:.5}", fils("1001").pretty()));
// Big numbers with trailing numbers are rounded down
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1234").pretty()));
assert_eq!("~1200 FIL", format!("{:.2}", fils("1234").pretty()));
assert_eq!("~1230 FIL", format!("{:.3}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.4}", fils("1234").pretty()));
assert_eq!("1234 FIL", format!("{:.5}", fils("1234").pretty()));
// Big numbers are rounded appropriately
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1900").pretty()));
assert_eq!("~2 kiloFIL", format!("{:.1}", fils("1500").pretty()));
assert_eq!("~1 kiloFIL", format!("{:.1}", fils("1400").pretty()));
}
}
}
#[cfg(test)]
mod fuzz {
use quickcheck::quickcheck;
use super::*;
quickcheck! {
fn roundtrip(expected: crate::shim::econ::TokenAmount) -> () {
// Default formatting
let actual = parse(&format!("{}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Absolute formatting
let actual = parse(&format!("{:#}", expected.pretty())).unwrap();
assert_eq!(expected, actual);
// Don't test rounded formatting...
}
}
| quickcheck! {
fn parser_no_panic(s: String) -> () {
let _ = parse(&s);
}
} | random_line_split |
|
humantoken.rs | $name
,)*
];
};
}
define_prefixes! {
quetta Q 30 1000000000000000000000000000000,
ronna R 27 1000000000000000000000000000,
yotta Y 24 1000000000000000000000000,
zetta Z 21 1000000000000000000000,
exa E 18 1000000000000000000,
peta P 15 1000000000000000,
tera T 12 1000000000000,
giga G 9 1000000000,
mega M 6 1000000,
kilo k 3 1000,
// Leave this out because
// - it simplifies our printing logic
// - these are not commonly used
// - it's more consistent with lotus
//
// hecto h 2 100,
// deca da 1 10,
// deci d -1 0.1,
// centi c -2 0.01,
milli m -3 0.001,
micro μ or u -6 0.000001,
nano n -9 0.000000001,
pico p -12 0.000000000001,
femto f -15 0.000000000000001,
atto a -18 0.000000000000000001,
zepto z -21 0.000000000000000000001,
yocto y -24 0.000000000000000000000001,
ronto r -27 0.000000000000000000000000001,
quecto q -30 0.000000000000000000000000000001,
}
#[test]
fn sorted() {
let is_sorted_biggest_first = SUPPORTED_PREFIXES
.windows(2)
.all(|pair| pair[0].multiplier() > pair[1].multiplier());
assert!(is_sorted_biggest_first)
}
}
mod parse {
// ENHANCE(aatifsyed): could accept pairs like "1 nano 1 atto"
use crate::shim::econ::TokenAmount;
use anyhow::{anyhow, bail};
use bigdecimal::{BigDecimal, ParseBigDecimalError};
use nom::{
bytes::complete::tag,
character::complete::multispace0,
combinator::{map_res, opt},
error::{FromExternalError, ParseError},
number::complete::recognize_float,
sequence::terminated,
IResult,
};
use super::si;
/// Parse token amounts as floats with SI prefixed-units.
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmount, parse};
/// fn assert_attos(input: &str, attos: u64) {
/// let expected = TokenAmount::from_atto(attos);
/// let actual = parse(input).unwrap();
/// assert_eq!(expected, actual);
/// }
/// assert_attos("1a", 1);
/// assert_attos("1aFIL", 1);
/// assert_attos("1 femtoFIL", 1000);
/// assert_attos("1.1 f", 1100);
/// assert_attos("1.0e3 attofil", 1000);
/// ```
///
/// # Known bugs
/// - `1efil` will not parse as an exa (`10^18`), because we'll try and
/// parse it as a exponent in the float. Instead use `1 efil`.
pub fn parse(input: &str) -> anyhow::Result<TokenAmount> {
let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?;
if let Some(scale) = scale {
big_decimal *= scale.multiplier();
}
let fil = big_decimal;
let attos = fil * si::atto.multiplier().inverse();
if!attos.is_integer() {
bail!("sub-atto amounts are not allowed");
}
let (attos, scale) = attos.with_scale(0).into_bigint_and_exponent();
assert_eq!(scale, 0, "we've just set the scale!");
Ok(TokenAmount::from_atto(attos))
}
fn nom2anyhow(e: nom::Err<nom::error::VerboseError<&str>>) -> anyhow::Error {
anyhow!("parse error: {e}")
}
fn parse_big_decimal_and_scale(
input: &str,
) -> anyhow::Result<(BigDecimal, Option<si::Prefix>)> {
// Strip `fil` or `FIL` at most once from the end
let input = match (input.strip_suffix("FIL"), input.strip_suffix("fil")) {
// remove whitespace before the units if there was any
(Some(stripped), _) => stripped.trim_end(),
(_, Some(stripped)) => stripped.trim_end(),
_ => input,
};
let (input, big_decimal) = permit_trailing_ws(bigdecimal)(input).map_err(nom2anyhow)?;
let (input, scale) = opt(permit_trailing_ws(si_scale))(input).map_err(nom2anyhow)?;
if!input.is_empty() {
bail!("Unexpected trailing input: {input}")
}
Ok((big_decimal, scale))
}
fn permit_trailing_ws<'a, F, O, E: ParseError<&'a str>>(
inner: F,
) -> impl FnMut(&'a str) -> IResult<&'a str, O, E>
where
F: FnMut(&'a str) -> IResult<&'a str, O, E>,
{
terminated(inner, multispace0)
}
/// Take an [si::Prefix] from the front of `input`
fn si_scale<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, si::Prefix, E> {
| input,
nom::error::ErrorKind::Alt,
)))
}
/// Take a float from the front of `input`
fn bigdecimal<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&str, BigDecimal, E>
where
E: FromExternalError<&'a str, ParseBigDecimalError>,
{
map_res(recognize_float, str::parse)(input)
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::{BigInt, One as _};
use super::*;
#[test]
fn cover_scales() {
for scale in si::SUPPORTED_PREFIXES {
let _did_not_panic = scale.multiplier();
}
}
#[test]
fn parse_bigdecimal() {
fn do_test(input: &str, expected: &str) {
let expected = BigDecimal::from_str(expected).unwrap();
let (rem, actual) = bigdecimal::<nom::error::VerboseError<_>>(input).unwrap();
assert_eq!(expected, actual);
assert!(rem.is_empty());
}
do_test("1", "1");
do_test("0.1", "0.1");
do_test(".1", ".1");
do_test("1e1", "10");
do_test("1.", "1");
}
fn test_dec_scale(
input: &str,
expected_amount: &str,
expected_scale: impl Into<Option<si::Prefix>>,
) {
let expected_amount = BigDecimal::from_str(expected_amount).unwrap();
let expected_scale = expected_scale.into();
let (actual_amount, actual_scale) = parse_big_decimal_and_scale(input).unwrap();
assert_eq!(expected_amount, actual_amount, "{input}");
assert_eq!(expected_scale, actual_scale, "{input}");
}
#[test]
fn basic_bigdecimal_and_scale() {
// plain
test_dec_scale("1", "1", None);
// include unit
test_dec_scale("1 FIL", "1", None);
test_dec_scale("1FIL", "1", None);
test_dec_scale("1 fil", "1", None);
test_dec_scale("1fil", "1", None);
let possible_units = ["", "fil", "FIL", " fil", " FIL"];
let possible_prefixes = ["atto", "a", " atto", " a"];
for unit in possible_units {
for prefix in possible_prefixes {
let input = format!("1{prefix}{unit}");
test_dec_scale(&input, "1", si::atto)
}
}
}
#[test]
fn parse_exa_and_exponent() {
test_dec_scale("1 E", "1", si::exa);
test_dec_scale("1e0E", "1", si::exa);
// ENHANCE(aatifsyed): this should be parsed as 1 exa, but that
// would probably require an entirely custom float parser with
// lookahead - users will have to include a space for now
// do_test("1E", "1", exa);
}
#[test]
fn more_than_96_bits() {
use std::iter::{once, repeat};
// The previous rust_decimal implementation had at most 96 bits of precision
// we should be able to exceed that
let test_str = once('1')
.chain(repeat('0').take(98))
.chain(['1'])
.collect::<String>();
test_dec_scale(&test_str, &test_str, None);
}
#[test]
fn disallow_too_small() {
parse("1 atto").unwrap();
assert_eq!(
parse("0.1 atto").unwrap_err().to_string(),
"sub-atto amounts are not allowed"
)
}
#[test]
fn some_values() {
let one_atto = TokenAmount::from_atto(BigInt::one());
let one_nano = TokenAmount::from_nano(BigInt::one());
assert_eq!(one_atto, parse("1 atto").unwrap());
assert_eq!(one_atto, parse("1000 zepto").unwrap());
assert_eq!(one_nano, parse("1 nano").unwrap());
}
#[test]
fn all_possible_prefixes() {
for scale in si::SUPPORTED_PREFIXES {
for prefix in scale.units.iter().chain([&scale.name]) {
// Need a space here because of the exa ambiguity
test_dec_scale(&format!("1 {prefix}"), "1", *scale);
}
}
}
}
}
mod print {
use std::fmt;
use crate::shim::econ::TokenAmount;
use bigdecimal::BigDecimal;
use num::{BigInt, Zero as _};
use super::si;
fn scale(n: BigDecimal) -> (BigDecimal, Option<si::Prefix>) {
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent > 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
if n.is_integer() {
return (n, None);
}
for prefix in si::SUPPORTED_PREFIXES
.iter()
.filter(|prefix| prefix.exponent < 0)
{
let scaled = n.clone() / prefix.multiplier();
if scaled.is_integer() {
return (scaled, Some(*prefix));
}
}
let smallest_prefix = si::SUPPORTED_PREFIXES.last().unwrap();
(n / smallest_prefix.multiplier(), Some(*smallest_prefix))
}
pub struct Pretty {
attos: BigInt,
}
impl From<&TokenAmount> for Pretty {
fn from(value: &TokenAmount) -> Self {
Self {
attos: value.atto().clone(),
}
}
}
pub trait TokenAmountPretty {
fn pretty(&self) -> Pretty;
}
impl TokenAmountPretty for TokenAmount {
/// Note the following format specifiers:
/// - `{:#}`: print number of FIL, not e.g `milliFIL`
/// - `{:.4}`: round to 4 significant figures
/// - `{:.#4}`: both
///
/// ```
/// # use forest_filecoin::doctest_private::{TokenAmountPretty as _, TokenAmount};
///
/// let amount = TokenAmount::from_nano(1500);
///
/// // Defaults to precise, with SI prefix
/// assert_eq!("1500 nanoFIL", format!("{}", amount.pretty()));
///
/// // Rounded to 1 s.f
/// assert_eq!("~2 microFIL", format!("{:.1}", amount.pretty()));
///
/// // Show absolute FIL
/// assert_eq!("0.0000015 FIL", format!("{:#}", amount.pretty()));
///
/// // Rounded absolute FIL
/// assert_eq!("~0.000002 FIL", format!("{:#.1}", amount.pretty()));
///
/// // We only indicate lost precision when relevant
/// assert_eq!("1500 nanoFIL", format!("{:.2}", amount.pretty()));
/// ```
///
/// # Formatting
/// - We select the most diminutive SI prefix (or not!) that allows us
/// to display an integer amount.
// RUST(aatifsyed): this should be -> impl fmt::Display
//
// Users shouldn't be able to name `Pretty` anyway
fn pretty(&self) -> Pretty {
Pretty::from(self)
}
}
impl fmt::Display for Pretty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let actual_fil = &self.attos * si::atto.multiplier();
// rounding
let fil_for_printing = match f.precision() {
None => actual_fil.normalized(),
Some(prec) => actual_fil
.with_prec(u64::try_from(prec).expect("requested precision is absurd"))
.normalized(),
};
let precision_was_lost = fil_for_printing!= actual_fil;
if precision_was_lost {
f.write_str("~")?;
}
// units or whole
let (print_me, prefix) = match f.alternate() {
true => (fil_for_printing, None),
false => scale(fil_for_printing),
};
// write the string
match print_me.is_zero() {
true => f.write_str("0 FIL"),
false => match prefix {
Some(prefix) => f.write_fmt(format_args!("{print_me} {}FIL", prefix.name)),
None => f.write_fmt(format_args!("{print_me} FIL")),
},
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use num::One as _;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn prefixes_represent_themselves() {
for prefix in si::SUPPORTED_PREFIXES {
let input = BigDecimal::from_str(prefix.multiplier).unwrap();
assert_eq!((BigDecimal::one(), Some(*prefix)), scale(input));
}
}
#[test]
fn very_large() {
let mut one_thousand_quettas = String::from(si::quetta.multiplier);
one_thousand_quettas.push_str("000");
test_scale(&one_thousand_quettas, "1000", si::quetta);
}
#[test]
fn very_small() {
let mut one_thousanth_of_a_quecto = String::from(si::quecto.multiplier);
one_thousanth_of_a_quecto.pop();
one_thousanth_of_a_quecto.push_str("0001");
test_scale(&one_thousanth_of_a_quecto, "0.001", si::quecto);
}
fn test_scale(
input: &str,
expected_value: &str,
expected_prefix: impl Into<Option<si::Prefix>>,
) {
let input = BigDecimal::from_str(input).unwrap();
let expected_value = BigDecimal::from_str(expected_value).unwrap();
let expected_prefix = expected_prefix.into();
assert_eq!((expected_value, expected_prefix), scale(input))
}
#[test]
fn simple() {
test_scale("1000000", "1", si::mega);
test_scale("100000", "100", si::kilo);
test_scale("10000", "10", si::kilo);
test_scale("1000", "1", si::kilo);
test_scale("100", "100", None);
test_scale("10", "10", None);
test_scale("1", "1", None);
test_scale("0.1", "100", si::milli);
test_scale("0.01", "10", si::milli);
test_scale("0.001", "1", si::milli);
test_scale("0.0001", "100", si::micro);
}
#[test]
fn trailing_one() {
test_scale("10001000", "10001", si::kilo);
test_scale("10001", "10001", None);
test_scale("1000.1", "1000100", si::milli);
}
fn attos(input: &str) -> TokenAmount {
TokenAmount::from_atto(BigInt::from_str(input).unwrap())
}
fn fils(input: &str) -> TokenAmount {
TokenAmount::from_whole(BigInt::from_str(input).unwrap())
}
| // Try the longest matches first, so we don't e.g match `a` instead of `atto`,
// leaving `tto`.
let mut scales = si::SUPPORTED_PREFIXES
.iter()
.flat_map(|scale| {
std::iter::once(&scale.name)
.chain(scale.units)
.map(move |prefix| (*prefix, scale))
})
.collect::<Vec<_>>();
scales.sort_by_key(|(prefix, _)| std::cmp::Reverse(*prefix));
for (prefix, scale) in scales {
if let Ok((rem, _prefix)) = tag::<_, _, E>(prefix)(input) {
return Ok((rem, *scale));
}
}
Err(nom::Err::Error(E::from_error_kind( | identifier_body |
lib.rs | // LNP/BP lLibraries implementing LNPBP specifications & standards
// Written in 2021-2022 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
// Coding conventions
#![recursion_limit = "256"]
#![deny(dead_code, missing_docs, warnings)]
//! Library implementing LNPBP-14 standard: Bech32 encoding for
//! client-side-validated data.
//!
//! Types that need to have `data1...` and `z1...` bech 32 implementation
//! according to LNPBP-14 must implement [`ToBech32Payload`] and
//! [`FromBech32Payload`] traits.
//!
//! Bech32 `id1...` representation is provided automatically only for hash types
//! implementing [`bitcoin_hashes::Hash`] trait
#[macro_use]
extern crate amplify;
#[macro_use]
extern crate strict_encoding;
#[cfg(feature = "serde")]
#[macro_use]
extern crate serde_crate as serde;
use std::convert::{Infallible, TryFrom};
use std::fmt::{self, Debug, Formatter};
use std::str::FromStr;
use amplify::hex::ToHex;
use bech32::{FromBase32, ToBase32, Variant};
use bitcoin_hashes::{sha256t, Hash};
#[cfg(feature = "zip")]
use deflate::{write::DeflateEncoder, Compression};
#[cfg(feature = "serde")]
use serde::{
de::{Error as SerdeError, Unexpected, Visitor},
Deserializer, Serializer,
};
#[cfg(feature = "serde")]
use serde_with::{hex::Hex, As};
/// Bech32 HRP used in generic identifiers
pub const HRP_ID: &str = "id";
/// Bech32 HRP used for representation of arbitrary data blobs in their raw
/// (uncompressed) form
pub const HRP_DATA: &str = "data";
#[cfg(feature = "zip")]
/// Bech32 HRP used for representation of zip-compressed blobs
pub const HRP_ZIP: &str = "z";
/// Constant specifying default compression algorithm ("deflate")
#[cfg(feature = "zip")]
pub const RAW_DATA_ENCODING_DEFLATE: u8 = 1u8;
/// Errors generated by Bech32 conversion functions (both parsing and
/// type-specific conversion errors)
#[derive(Clone, PartialEq, Eq, Display, Debug, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// bech32 string parse error - {0}
#[from]
Bech32Error(::bech32::Error),
/// payload data are not strictly encoded - {0}
#[from]
NotStrictEncoded(strict_encoding::Error),
/// payload data are not a bitcoin hash - {0}
#[from]
NotBitcoinHash(bitcoin_hashes::Error),
/// Requested object type does not match used Bech32 HRP
WrongPrefix,
/// bech32m encoding must be used instead of legacy bech32
WrongVariant,
/// payload must start with encoding prefix
NoEncodingPrefix,
/// provided raw data use unknown encoding version {0}
UnknownRawDataEncoding(u8),
/// can not encode raw data with DEFLATE algorithm
DeflateEncoding,
/// error inflating compressed data from payload: {0}
InflateError(String),
}
impl From<Infallible> for Error {
fn from(_: Infallible) -> Self {
unreachable!("infalliable error in lnpbp_bech32 blob")
} | /// Type for wrapping Vec<u8> data in cases you need to do a convenient
/// enum variant display derives with `#[display(inner)]`
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", transparent)
)]
#[derive(
Wrapper, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default, Display,
From
)]
#[derive(StrictEncode, StrictDecode)]
#[wrap(
Index,
IndexMut,
IndexRange,
IndexFull,
IndexFrom,
IndexTo,
IndexInclusive
)]
#[display(Vec::bech32_data_string)]
// We get `(To)Bech32DataString` and `FromBech32DataString` for free b/c
// the wrapper creates `From<Vec<u8>>` impl for us, which with rust stdlib
// implies `TryFrom<Vec<u8>>`, for which we have auto trait derivation
// `FromBech32Payload`, for which the traits above are automatically derived
pub struct Blob(
#[cfg_attr(feature = "serde", serde(with = "As::<Hex>"))] Vec<u8>,
);
impl AsRef<[u8]> for Blob {
fn as_ref(&self) -> &[u8] { &self.0 }
}
impl Debug for Blob {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "Blob({})", self.0.to_hex())
}
}
impl FromStr for Blob {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Blob::from_bech32_data_str(s)
}
}
/// Convertor trait for extracting data from a given type which will be part of
/// Bech32 payload
pub trait ToBech32Payload {
/// Must return a vector with Bech32 payload data
fn to_bech32_payload(&self) -> Vec<u8>;
}
/// Extracts data representation from a given type which will be part of Bech32
/// payload
pub trait AsBech32Payload {
/// Must return a reference to a slice representing Bech32 payload data
fn as_bech32_payload(&self) -> &[u8];
}
impl<T> AsBech32Payload for T
where
T: AsRef<[u8]>,
{
fn as_bech32_payload(&self) -> &[u8] { self.as_ref() }
}
/// Convertor which constructs a given type from Bech32 payload data
pub trait FromBech32Payload
where
Self: Sized,
{
/// Construct type from Bech32 payload data
fn from_bech32_payload(payload: Vec<u8>) -> Result<Self, Error>;
}
impl<T> FromBech32Payload for T
where
T: TryFrom<Vec<u8>>,
Error: From<T::Error>,
{
fn from_bech32_payload(payload: Vec<u8>) -> Result<T, Error> {
Ok(T::try_from(payload)?)
}
}
// -- Common (non-LNPBP-39) traits
/// Creates Bech32 string with appropriate type data representation.
/// Depending on the specific type, this may be `id`-string, `data`-string,
/// `z`-string or other type of HRP.
pub trait ToBech32String {
/// Creates Bech32 string with appropriate type data representation
fn to_bech32_string(&self) -> String;
}
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
pub trait FromBech32Str {
/// Specifies which HRP is used by Bech32 string representing this data type
const HRP: &'static str;
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
fn from_bech32_str(s: &str) -> Result<Self, Error>
where
Self: Sized;
}
/// Strategies for automatic implementation of the Bech32 traits
pub mod strategies {
use amplify::{Holder, Wrapper};
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
/// Strategy for Bech32 representation as uncompressed data (starting from
/// `data1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct UsingStrictEncoding;
/// Strategy for Bech32 representation of the newtypes wrapping other types.
/// The strategy simply inherits Bech32 representation from the inner type.
pub struct Wrapped;
#[cfg(feature = "zip")]
/// Strategy for Bech32 representation as compressed data (starting from
/// `z1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct CompressedStrictEncoding;
/// Helper trait for implementing specific strategy for Bech32 construction
pub trait Strategy {
/// Bech32 HRP prefix used by a type
const HRP: &'static str;
/// Specific strategy used for automatic implementation of all
/// Bech32-related traits.
type Strategy;
}
impl<T> ToBech32String for T
where
T: Strategy + Clone,
Holder<T, <T as Strategy>::Strategy>: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
Holder::new(self.clone()).to_bech32_string()
}
}
impl<T> FromBech32Str for T
where
T: Strategy,
Holder<T, <T as Strategy>::Strategy>: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Holder::from_bech32_str(s)?.into_inner())
}
}
impl<T> ToBech32String for Holder<T, Wrapped>
where
T: Wrapper,
T::Inner: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
self.as_inner().as_inner().to_bech32_string()
}
}
impl<T> FromBech32Str for Holder<T, Wrapped>
where
T: Wrapper + Strategy,
T::Inner: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::from_inner(T::Inner::from_bech32_str(s)?)))
}
}
impl<T> ToBech32String for Holder<T, UsingStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
::bech32::encode(T::HRP, data.to_base32(), Variant::Bech32m)
.unwrap_or_else(|_| s!("Error: wrong bech32 prefix"))
}
}
impl<T> FromBech32Str for Holder<T, UsingStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = ::bech32::decode(s)?;
if hrp.as_str()!= Self::HRP {
return Err(Error::WrongPrefix);
}
if variant!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
Ok(Self::new(T::strict_deserialize(Vec::<u8>::from_base32(
&data,
)?)?))
}
}
}
pub use strategies::Strategy;
// -- Sealed traits & their implementation
/// Special trait for preventing implementation of [`FromBech32DataStr`] and
/// others from outside of this crate. For details see
/// <https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed>
mod sealed {
use amplify::Wrapper;
use super::*;
pub trait HashType<Tag>: Wrapper<Inner = sha256t::Hash<Tag>>
where
Tag: sha256t::Tag,
{
}
pub trait ToPayload: ToBech32Payload {}
pub trait AsPayload: AsBech32Payload {}
pub trait FromPayload: FromBech32Payload {}
impl<T, Tag> HashType<Tag> for T
where
T: Wrapper<Inner = sha256t::Hash<Tag>>,
Tag: sha256t::Tag,
{
}
impl<T> ToPayload for T where T: ToBech32Payload {}
impl<T> AsPayload for T where T: AsBech32Payload {}
impl<T> FromPayload for T where T: FromBech32Payload {}
}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait ToBech32DataString: sealed::ToPayload {
/// Returns `data1...` Bech32 representation of a given type
fn to_bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.to_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> ToBech32DataString for T where T: sealed::ToPayload {}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait Bech32DataString: sealed::AsPayload {
/// Returns `data1...` Bech32 representation of a given type
fn bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.as_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> Bech32DataString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `data1...` Bech32 string
pub trait FromBech32DataStr
where
Self: Sized + sealed::FromPayload,
{
/// Reconstructs type data from `data1...` Bech32 string
fn from_bech32_data_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = bech32::decode(s)?;
if hrp!= HRP_DATA {
return Err(Error::WrongPrefix);
}
if variant!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
Self::from_bech32_payload(Vec::<u8>::from_base32(&data)?)
}
}
impl<T> FromBech32DataStr for T where T: sealed::FromPayload {}
#[doc(hidden)]
#[cfg(feature = "zip")]
pub mod zip {
use amplify::Holder;
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
fn payload_to_bech32_zip_string(hrp: &str, payload: &[u8]) -> String {
use std::io::Write;
// We initialize writer with a version byte, indicating deflation
// algorithm used
let writer = vec![RAW_DATA_ENCODING_DEFLATE];
let mut encoder = DeflateEncoder::new(writer, Compression::Best);
encoder
.write_all(payload)
.expect("in-memory strict encoder failure");
let data = encoder.finish().expect("zip algorithm failure");
::bech32::encode(hrp, data.to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
fn bech32_zip_str_to_payload(hrp: &str, s: &str) -> Result<Vec<u8>, Error> {
let (prefix, data, version) = bech32::decode(s)?;
if prefix!= hrp {
return Err(Error::WrongPrefix);
}
if version!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
let data = Vec::<u8>::from_base32(&data)?;
match *data[..].first().ok_or(Error::NoEncodingPrefix)? {
RAW_DATA_ENCODING_DEFLATE => {
let decoded = inflate::inflate_bytes(&data[1..])
.map_err(Error::InflateError)?;
Ok(decoded)
}
unknown_ver => Err(Error::UnknownRawDataEncoding(unknown_ver)),
}
}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait ToBech32ZipString: sealed::ToPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn to_bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, &self.to_bech32_payload())
}
}
impl<T> ToBech32ZipString for T where T: sealed::ToPayload {}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait Bech32ZipString: sealed::AsPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, self.as_bech32_payload())
}
}
impl<T> Bech32ZipString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `z1...` (compressed binary data
/// blob) Bech32 string
pub trait FromBech32ZipStr: sealed::FromPayload {
/// Reconstructs type data from `z1...` (compressed binary data blob)
/// Bech32 string
fn from_bech32_zip_str(s: &str) -> Result<Self, Error> {
Self::from_bech32_payload(bech32_zip_str_to_payload(HRP_ZIP, s)?)
}
}
impl<T> FromBech32ZipStr for T where T: sealed::FromPayload {}
impl<T> ToBech32String for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
payload_to_bech32_zip_string(T::HRP, &data)
}
}
impl<T> FromBech32Str for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::strict_deserialize(
bech32_zip_str_to_payload(Self::HRP, s)?,
)?))
}
}
}
#[cfg(feature = "zip")]
pub use zip::*;
/// Trait representing given bitcoin hash type as a Bech32 `id1...` value
pub trait ToBech32IdString<Tag>
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
/// Returns Bech32-encoded string in form of `id1...` representing the type
fn to_bech32_id_string(&self) -> String;
}
/// Trait that can generate the type from a given Bech32 `id1...` value
pub trait FromBech32IdStr<Tag>
where
Self: sealed::HashType<Tag> + Sized,
Tag: sha256t::Tag,
{
/// Reconstructs the identifier type from the provided Bech32 `id1...`
/// string
fn from_bech32_id_str(s: &str) -> Result<Self, Error>;
}
impl<T, Tag> ToBech32IdString<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn to_bech32_id_string(&self) -> String {
::bech32::encode(HRP_ID, self.to_inner().to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T, Tag> FromBech32IdStr<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn from_bech32_id_str(s: &str) -> Result<T, Error> {
let (hrp, id, variant) = ::bech32::decode(s)?;
if hrp!= HRP_ID {
return Err(Error::WrongPrefix);
}
if variant!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
let vec = Vec::<u8>::from_base32(&id)?;
Ok(Self::from_inner(Self::Inner::from_slice(&vec)?))
}
}
/// Helper method for serde serialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn serialize<T, S>(data: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: ToBech32String,
{
serializer.serialize_str(&data.to_bech32_string())
}
/// Helper method for serde deserialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromBech32Str,
{
deserializer.deserialize_str(Bech32Visitor::<T>(std::marker::PhantomData))
}
#[cfg(feature = "serde")]
struct Bech32Visitor<Value>(std::marker::PhantomData<Value>);
#[cfg(feature = "serde")]
impl<'de, ValueT> Visitor<'de> for Bech32Visitor<ValueT>
where
ValueT: FromBech32Str,
{
type Value = ValueT;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a bech32m-encoded string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
Self::Value::from_bech32_str(v).map_err(|_| {
E::invalid_value(Unexpected::Str(v), &"valid bech32 string")
})
}
} | }
| random_line_split |
lib.rs | // LNP/BP lLibraries implementing LNPBP specifications & standards
// Written in 2021-2022 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
// Coding conventions
#![recursion_limit = "256"]
#![deny(dead_code, missing_docs, warnings)]
//! Library implementing LNPBP-14 standard: Bech32 encoding for
//! client-side-validated data.
//!
//! Types that need to have `data1...` and `z1...` bech 32 implementation
//! according to LNPBP-14 must implement [`ToBech32Payload`] and
//! [`FromBech32Payload`] traits.
//!
//! Bech32 `id1...` representation is provided automatically only for hash types
//! implementing [`bitcoin_hashes::Hash`] trait
#[macro_use]
extern crate amplify;
#[macro_use]
extern crate strict_encoding;
#[cfg(feature = "serde")]
#[macro_use]
extern crate serde_crate as serde;
use std::convert::{Infallible, TryFrom};
use std::fmt::{self, Debug, Formatter};
use std::str::FromStr;
use amplify::hex::ToHex;
use bech32::{FromBase32, ToBase32, Variant};
use bitcoin_hashes::{sha256t, Hash};
#[cfg(feature = "zip")]
use deflate::{write::DeflateEncoder, Compression};
#[cfg(feature = "serde")]
use serde::{
de::{Error as SerdeError, Unexpected, Visitor},
Deserializer, Serializer,
};
#[cfg(feature = "serde")]
use serde_with::{hex::Hex, As};
/// Bech32 HRP used in generic identifiers
pub const HRP_ID: &str = "id";
/// Bech32 HRP used for representation of arbitrary data blobs in their raw
/// (uncompressed) form
pub const HRP_DATA: &str = "data";
#[cfg(feature = "zip")]
/// Bech32 HRP used for representation of zip-compressed blobs
pub const HRP_ZIP: &str = "z";
/// Constant specifying default compression algorithm ("deflate")
#[cfg(feature = "zip")]
pub const RAW_DATA_ENCODING_DEFLATE: u8 = 1u8;
/// Errors generated by Bech32 conversion functions (both parsing and
/// type-specific conversion errors)
#[derive(Clone, PartialEq, Eq, Display, Debug, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// bech32 string parse error - {0}
#[from]
Bech32Error(::bech32::Error),
/// payload data are not strictly encoded - {0}
#[from]
NotStrictEncoded(strict_encoding::Error),
/// payload data are not a bitcoin hash - {0}
#[from]
NotBitcoinHash(bitcoin_hashes::Error),
/// Requested object type does not match used Bech32 HRP
WrongPrefix,
/// bech32m encoding must be used instead of legacy bech32
WrongVariant,
/// payload must start with encoding prefix
NoEncodingPrefix,
/// provided raw data use unknown encoding version {0}
UnknownRawDataEncoding(u8),
/// can not encode raw data with DEFLATE algorithm
DeflateEncoding,
/// error inflating compressed data from payload: {0}
InflateError(String),
}
impl From<Infallible> for Error {
fn from(_: Infallible) -> Self {
unreachable!("infalliable error in lnpbp_bech32 blob")
}
}
/// Type for wrapping Vec<u8> data in cases you need to do a convenient
/// enum variant display derives with `#[display(inner)]`
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", transparent)
)]
#[derive(
Wrapper, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default, Display,
From
)]
#[derive(StrictEncode, StrictDecode)]
#[wrap(
Index,
IndexMut,
IndexRange,
IndexFull,
IndexFrom,
IndexTo,
IndexInclusive
)]
#[display(Vec::bech32_data_string)]
// We get `(To)Bech32DataString` and `FromBech32DataString` for free b/c
// the wrapper creates `From<Vec<u8>>` impl for us, which with rust stdlib
// implies `TryFrom<Vec<u8>>`, for which we have auto trait derivation
// `FromBech32Payload`, for which the traits above are automatically derived
pub struct Blob(
#[cfg_attr(feature = "serde", serde(with = "As::<Hex>"))] Vec<u8>,
);
impl AsRef<[u8]> for Blob {
fn as_ref(&self) -> &[u8] { &self.0 }
}
impl Debug for Blob {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "Blob({})", self.0.to_hex())
}
}
impl FromStr for Blob {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Blob::from_bech32_data_str(s)
}
}
/// Convertor trait for extracting data from a given type which will be part of
/// Bech32 payload
pub trait ToBech32Payload {
/// Must return a vector with Bech32 payload data
fn to_bech32_payload(&self) -> Vec<u8>;
}
/// Extracts data representation from a given type which will be part of Bech32
/// payload
pub trait AsBech32Payload {
/// Must return a reference to a slice representing Bech32 payload data
fn as_bech32_payload(&self) -> &[u8];
}
impl<T> AsBech32Payload for T
where
T: AsRef<[u8]>,
{
fn as_bech32_payload(&self) -> &[u8] { self.as_ref() }
}
/// Convertor which constructs a given type from Bech32 payload data
pub trait FromBech32Payload
where
Self: Sized,
{
/// Construct type from Bech32 payload data
fn from_bech32_payload(payload: Vec<u8>) -> Result<Self, Error>;
}
impl<T> FromBech32Payload for T
where
T: TryFrom<Vec<u8>>,
Error: From<T::Error>,
{
fn from_bech32_payload(payload: Vec<u8>) -> Result<T, Error> {
Ok(T::try_from(payload)?)
}
}
// -- Common (non-LNPBP-39) traits
/// Creates Bech32 string with appropriate type data representation.
/// Depending on the specific type, this may be `id`-string, `data`-string,
/// `z`-string or other type of HRP.
pub trait ToBech32String {
/// Creates Bech32 string with appropriate type data representation
fn to_bech32_string(&self) -> String;
}
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
pub trait FromBech32Str {
/// Specifies which HRP is used by Bech32 string representing this data type
const HRP: &'static str;
/// Constructs type from the provided Bech32 string, or fails with
/// [`enum@Error`]
fn from_bech32_str(s: &str) -> Result<Self, Error>
where
Self: Sized;
}
/// Strategies for automatic implementation of the Bech32 traits
pub mod strategies {
use amplify::{Holder, Wrapper};
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
/// Strategy for Bech32 representation as uncompressed data (starting from
/// `data1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct UsingStrictEncoding;
/// Strategy for Bech32 representation of the newtypes wrapping other types.
/// The strategy simply inherits Bech32 representation from the inner type.
pub struct Wrapped;
#[cfg(feature = "zip")]
/// Strategy for Bech32 representation as compressed data (starting from
/// `z1...` HRP). The data are takken by using [`StrictEncode`]
/// implementation defined for the type.
pub struct CompressedStrictEncoding;
/// Helper trait for implementing specific strategy for Bech32 construction
pub trait Strategy {
/// Bech32 HRP prefix used by a type
const HRP: &'static str;
/// Specific strategy used for automatic implementation of all
/// Bech32-related traits.
type Strategy;
}
impl<T> ToBech32String for T
where
T: Strategy + Clone,
Holder<T, <T as Strategy>::Strategy>: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
Holder::new(self.clone()).to_bech32_string()
}
}
impl<T> FromBech32Str for T
where
T: Strategy,
Holder<T, <T as Strategy>::Strategy>: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Holder::from_bech32_str(s)?.into_inner())
}
}
impl<T> ToBech32String for Holder<T, Wrapped>
where
T: Wrapper,
T::Inner: ToBech32String,
{
#[inline]
fn to_bech32_string(&self) -> String {
self.as_inner().as_inner().to_bech32_string()
}
}
impl<T> FromBech32Str for Holder<T, Wrapped>
where
T: Wrapper + Strategy,
T::Inner: FromBech32Str,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::from_inner(T::Inner::from_bech32_str(s)?)))
}
}
impl<T> ToBech32String for Holder<T, UsingStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
::bech32::encode(T::HRP, data.to_base32(), Variant::Bech32m)
.unwrap_or_else(|_| s!("Error: wrong bech32 prefix"))
}
}
impl<T> FromBech32Str for Holder<T, UsingStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = ::bech32::decode(s)?;
if hrp.as_str()!= Self::HRP {
return Err(Error::WrongPrefix);
}
if variant!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
Ok(Self::new(T::strict_deserialize(Vec::<u8>::from_base32(
&data,
)?)?))
}
}
}
pub use strategies::Strategy;
// -- Sealed traits & their implementation
/// Special trait for preventing implementation of [`FromBech32DataStr`] and
/// others from outside of this crate. For details see
/// <https://rust-lang.github.io/api-guidelines/future-proofing.html#sealed-traits-protect-against-downstream-implementations-c-sealed>
mod sealed {
use amplify::Wrapper;
use super::*;
pub trait HashType<Tag>: Wrapper<Inner = sha256t::Hash<Tag>>
where
Tag: sha256t::Tag,
{
}
pub trait ToPayload: ToBech32Payload {}
pub trait AsPayload: AsBech32Payload {}
pub trait FromPayload: FromBech32Payload {}
impl<T, Tag> HashType<Tag> for T
where
T: Wrapper<Inner = sha256t::Hash<Tag>>,
Tag: sha256t::Tag,
{
}
impl<T> ToPayload for T where T: ToBech32Payload {}
impl<T> AsPayload for T where T: AsBech32Payload {}
impl<T> FromPayload for T where T: FromBech32Payload {}
}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait ToBech32DataString: sealed::ToPayload {
/// Returns `data1...` Bech32 representation of a given type
fn to_bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.to_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> ToBech32DataString for T where T: sealed::ToPayload {}
/// Trait for creating `data1...` Bech32 representation of a given type
pub trait Bech32DataString: sealed::AsPayload {
/// Returns `data1...` Bech32 representation of a given type
fn bech32_data_string(&self) -> String {
::bech32::encode(
HRP_DATA,
self.as_bech32_payload().to_base32(),
Variant::Bech32m,
)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T> Bech32DataString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `data1...` Bech32 string
pub trait FromBech32DataStr
where
Self: Sized + sealed::FromPayload,
{
/// Reconstructs type data from `data1...` Bech32 string
fn from_bech32_data_str(s: &str) -> Result<Self, Error> {
let (hrp, data, variant) = bech32::decode(s)?;
if hrp!= HRP_DATA {
return Err(Error::WrongPrefix);
}
if variant!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
Self::from_bech32_payload(Vec::<u8>::from_base32(&data)?)
}
}
impl<T> FromBech32DataStr for T where T: sealed::FromPayload {}
#[doc(hidden)]
#[cfg(feature = "zip")]
pub mod zip {
use amplify::Holder;
use strict_encoding::{StrictDecode, StrictEncode};
use super::*;
fn payload_to_bech32_zip_string(hrp: &str, payload: &[u8]) -> String {
use std::io::Write;
// We initialize writer with a version byte, indicating deflation
// algorithm used
let writer = vec![RAW_DATA_ENCODING_DEFLATE];
let mut encoder = DeflateEncoder::new(writer, Compression::Best);
encoder
.write_all(payload)
.expect("in-memory strict encoder failure");
let data = encoder.finish().expect("zip algorithm failure");
::bech32::encode(hrp, data.to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
fn bech32_zip_str_to_payload(hrp: &str, s: &str) -> Result<Vec<u8>, Error> {
let (prefix, data, version) = bech32::decode(s)?;
if prefix!= hrp {
return Err(Error::WrongPrefix);
}
if version!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
let data = Vec::<u8>::from_base32(&data)?;
match *data[..].first().ok_or(Error::NoEncodingPrefix)? {
RAW_DATA_ENCODING_DEFLATE => {
let decoded = inflate::inflate_bytes(&data[1..])
.map_err(Error::InflateError)?;
Ok(decoded)
}
unknown_ver => Err(Error::UnknownRawDataEncoding(unknown_ver)),
}
}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait ToBech32ZipString: sealed::ToPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn to_bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, &self.to_bech32_payload())
}
}
impl<T> ToBech32ZipString for T where T: sealed::ToPayload {}
/// Trait for creating `z1...` (compressed binary data blob) Bech32
/// representation of a given type
pub trait Bech32ZipString: sealed::AsPayload {
/// Returns `z1...` (compressed binary data blob) Bech32 representation
/// of a given type
fn bech32_zip_string(&self) -> String {
payload_to_bech32_zip_string(HRP_ZIP, self.as_bech32_payload())
}
}
impl<T> Bech32ZipString for T where T: sealed::AsPayload {}
/// Trait for reconstruction type data from `z1...` (compressed binary data
/// blob) Bech32 string
pub trait FromBech32ZipStr: sealed::FromPayload {
/// Reconstructs type data from `z1...` (compressed binary data blob)
/// Bech32 string
fn from_bech32_zip_str(s: &str) -> Result<Self, Error> {
Self::from_bech32_payload(bech32_zip_str_to_payload(HRP_ZIP, s)?)
}
}
impl<T> FromBech32ZipStr for T where T: sealed::FromPayload {}
impl<T> ToBech32String for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictEncode + Strategy,
{
#[inline]
fn to_bech32_string(&self) -> String {
let data = self
.as_inner()
.strict_serialize()
.expect("in-memory strict encoding failure");
payload_to_bech32_zip_string(T::HRP, &data)
}
}
impl<T> FromBech32Str for Holder<T, strategies::CompressedStrictEncoding>
where
T: StrictDecode + Strategy,
{
const HRP: &'static str = T::HRP;
#[inline]
fn from_bech32_str(s: &str) -> Result<Self, Error> {
Ok(Self::new(T::strict_deserialize(
bech32_zip_str_to_payload(Self::HRP, s)?,
)?))
}
}
}
#[cfg(feature = "zip")]
pub use zip::*;
/// Trait representing given bitcoin hash type as a Bech32 `id1...` value
pub trait ToBech32IdString<Tag>
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
/// Returns Bech32-encoded string in form of `id1...` representing the type
fn to_bech32_id_string(&self) -> String;
}
/// Trait that can generate the type from a given Bech32 `id1...` value
pub trait FromBech32IdStr<Tag>
where
Self: sealed::HashType<Tag> + Sized,
Tag: sha256t::Tag,
{
/// Reconstructs the identifier type from the provided Bech32 `id1...`
/// string
fn from_bech32_id_str(s: &str) -> Result<Self, Error>;
}
impl<T, Tag> ToBech32IdString<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn to_bech32_id_string(&self) -> String {
::bech32::encode(HRP_ID, self.to_inner().to_base32(), Variant::Bech32m)
.expect("HRP is hardcoded and can't fail")
}
}
impl<T, Tag> FromBech32IdStr<Tag> for T
where
Self: sealed::HashType<Tag>,
Tag: sha256t::Tag,
{
fn from_bech32_id_str(s: &str) -> Result<T, Error> {
let (hrp, id, variant) = ::bech32::decode(s)?;
if hrp!= HRP_ID {
return Err(Error::WrongPrefix);
}
if variant!= Variant::Bech32m {
return Err(Error::WrongVariant);
}
let vec = Vec::<u8>::from_base32(&id)?;
Ok(Self::from_inner(Self::Inner::from_slice(&vec)?))
}
}
/// Helper method for serde serialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn serialize<T, S>(data: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: ToBech32String,
{
serializer.serialize_str(&data.to_bech32_string())
}
/// Helper method for serde deserialization of types supporting Bech32
/// representation
#[cfg(feature = "serde")]
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromBech32Str,
{
deserializer.deserialize_str(Bech32Visitor::<T>(std::marker::PhantomData))
}
#[cfg(feature = "serde")]
struct | <Value>(std::marker::PhantomData<Value>);
#[cfg(feature = "serde")]
impl<'de, ValueT> Visitor<'de> for Bech32Visitor<ValueT>
where
ValueT: FromBech32Str,
{
type Value = ValueT;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a bech32m-encoded string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: SerdeError,
{
Self::Value::from_bech32_str(v).map_err(|_| {
E::invalid_value(Unexpected::Str(v), &"valid bech32 string")
})
}
}
| Bech32Visitor | identifier_name |
lib.rs | //! JSON-RPC client implementation.
#![deny(missing_docs)]
use failure::{format_err, Fail};
use futures::sync::{mpsc, oneshot};
use futures::{future, prelude::*};
use jsonrpc_core::{Error, Params};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::Value;
use std::marker::PhantomData;
pub mod transports;
#[cfg(test)]
mod logger;
/// The errors returned by the client.
#[derive(Debug, Fail)]
pub enum RpcError {
/// An error returned by the server.
#[fail(display = "Server returned rpc error {}", _0)]
JsonRpcError(Error),
/// Failure to parse server response.
#[fail(display = "Failed to parse server response as {}: {}", _0, _1)]
ParseError(String, failure::Error),
/// Request timed out.
#[fail(display = "Request timed out")]
Timeout,
/// Not rpc specific errors.
#[fail(display = "{}", _0)]
Other(failure::Error),
}
impl From<Error> for RpcError {
fn from(error: Error) -> Self {
RpcError::JsonRpcError(error)
}
}
/// An RPC call message.
struct CallMessage {
/// The RPC method name.
method: String,
/// The RPC method parameters.
params: Params,
/// The oneshot channel to send the result of the rpc
/// call to.
sender: oneshot::Sender<Result<Value, RpcError>>,
}
/// An RPC notification.
struct NotifyMessage {
/// The RPC method name.
method: String,
/// The RPC method paramters.
params: Params,
}
/// An RPC subscription.
struct Subscription {
/// The subscribe method name.
subscribe: String,
/// The subscribe method parameters.
subscribe_params: Params,
/// The name of the notification.
notification: String,
/// The unsubscribe method name.
unsubscribe: String,
}
/// An RPC subscribe message.
struct SubscribeMessage {
/// The subscription to subscribe to.
subscription: Subscription,
/// The channel to send notifications to.
sender: mpsc::Sender<Result<Value, RpcError>>,
}
/// A message sent to the `RpcClient`.
enum RpcMessage {
/// Make an RPC call.
Call(CallMessage),
/// Send a notification.
Notify(NotifyMessage),
/// Subscribe to a notification.
Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned +'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned +'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn subscribe<T: Serialize, R: DeserializeOwned +'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local; |
#[derive(Clone)]
struct AddClient(TypedClient);
impl From<RpcChannel> for AddClient {
fn from(channel: RpcChannel) -> Self {
AddClient(channel.into())
}
}
impl AddClient {
fn add(&self, a: u64, b: u64) -> impl Future<Item = u64, Error = RpcError> {
self.0.call_method("add", "u64", (a, b))
}
fn completed(&self, success: bool) -> impl Future<Item = (), Error = RpcError> {
self.0.notify("completed", (success,))
}
}
#[test]
fn test_client_terminates() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_method("add", |params: Params| {
let (a, b) = params.parse::<(u64, u64)>()?;
let res = a + b;
Ok(jsonrpc_core::to_value(res).unwrap())
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.add(3, 4)
.and_then(move |res| client.add(res, 5))
.join(rpc_client)
.map(|(res, ())| {
assert_eq!(res, 12);
})
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_send_notification() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_notification("completed", |params: Params| {
let (success,) = params.parse::<(bool,)>().expect("expected to receive one boolean");
assert_eq!(success, true);
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.completed(true)
.map(move |()| drop(client))
.join(rpc_client)
.map(|_| ())
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_handle_subscription() {
crate::logger::init_log();
// given
let mut handler = PubSubHandler::<local::LocalMeta, _>::default();
let called = Arc::new(AtomicBool::new(false));
let called2 = called.clone();
handler.add_subscription(
"hello",
("subscribe_hello", |params, _meta, subscriber: Subscriber| {
assert_eq!(params, core::Params::None);
let sink = subscriber
.assign_id(SubscriptionId::Number(5))
.expect("assigned subscription id");
std::thread::spawn(move || {
for i in 0..3 {
std::thread::sleep(std::time::Duration::from_millis(100));
let value = serde_json::json!({
"subscription": 5,
"result": vec![i],
});
sink.notify(serde_json::from_value(value).unwrap())
.wait()
.expect("sent notification");
}
});
}),
("unsubscribe_hello", move |id, _meta| {
// Should be called because session is dropped.
called2.store(true, Ordering::SeqCst);
assert_eq!(id, SubscriptionId::Number(5));
future::ok(core::Value::Bool(true))
}),
);
// when
let (client, rpc_client) = local::connect_with_pubsub::<TypedClient, _>(handler);
let received = Arc::new(std::sync::Mutex::new(vec![]));
let r2 = received.clone();
let fut = client
.subscribe::<_, (u32,)>("subscribe_hello", (), "hello", "unsubscribe_hello", "u32")
.and_then(|stream| {
stream
.into_future()
.map(move |(result, _)| {
drop(client);
r2.lock().unwrap().push(result.unwrap());
})
.map_err(|_| {
panic!("Expected message not received.");
})
})
.join(rpc_client)
.map(|(res, _)| {
log::info!("ok {:?}", res);
})
.map_err(|err| {
log::error!("err {:?}", err);
});
tokio::run(fut);
assert_eq!(called.load(Ordering::SeqCst), true);
assert!(
!received.lock().unwrap().is_empty(),
"Expected at least one received item."
);
}
} | use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; | random_line_split |
lib.rs | //! JSON-RPC client implementation.
#![deny(missing_docs)]
use failure::{format_err, Fail};
use futures::sync::{mpsc, oneshot};
use futures::{future, prelude::*};
use jsonrpc_core::{Error, Params};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::Value;
use std::marker::PhantomData;
pub mod transports;
#[cfg(test)]
mod logger;
/// The errors returned by the client.
#[derive(Debug, Fail)]
pub enum RpcError {
/// An error returned by the server.
#[fail(display = "Server returned rpc error {}", _0)]
JsonRpcError(Error),
/// Failure to parse server response.
#[fail(display = "Failed to parse server response as {}: {}", _0, _1)]
ParseError(String, failure::Error),
/// Request timed out.
#[fail(display = "Request timed out")]
Timeout,
/// Not rpc specific errors.
#[fail(display = "{}", _0)]
Other(failure::Error),
}
impl From<Error> for RpcError {
fn from(error: Error) -> Self {
RpcError::JsonRpcError(error)
}
}
/// An RPC call message.
struct CallMessage {
/// The RPC method name.
method: String,
/// The RPC method parameters.
params: Params,
/// The oneshot channel to send the result of the rpc
/// call to.
sender: oneshot::Sender<Result<Value, RpcError>>,
}
/// An RPC notification.
struct NotifyMessage {
/// The RPC method name.
method: String,
/// The RPC method paramters.
params: Params,
}
/// An RPC subscription.
struct Subscription {
/// The subscribe method name.
subscribe: String,
/// The subscribe method parameters.
subscribe_params: Params,
/// The name of the notification.
notification: String,
/// The unsubscribe method name.
unsubscribe: String,
}
/// An RPC subscribe message.
struct SubscribeMessage {
/// The subscription to subscribe to.
subscription: Subscription,
/// The channel to send notifications to.
sender: mpsc::Sender<Result<Value, RpcError>>,
}
/// A message sent to the `RpcClient`.
enum RpcMessage {
/// Make an RPC call.
Call(CallMessage),
/// Send a notification.
Notify(NotifyMessage),
/// Subscribe to a notification.
Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned +'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned +'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn subscribe<T: Serialize, R: DeserializeOwned +'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> |
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local;
use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[derive(Clone)]
struct AddClient(TypedClient);
impl From<RpcChannel> for AddClient {
fn from(channel: RpcChannel) -> Self {
AddClient(channel.into())
}
}
impl AddClient {
fn add(&self, a: u64, b: u64) -> impl Future<Item = u64, Error = RpcError> {
self.0.call_method("add", "u64", (a, b))
}
fn completed(&self, success: bool) -> impl Future<Item = (), Error = RpcError> {
self.0.notify("completed", (success,))
}
}
#[test]
fn test_client_terminates() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_method("add", |params: Params| {
let (a, b) = params.parse::<(u64, u64)>()?;
let res = a + b;
Ok(jsonrpc_core::to_value(res).unwrap())
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.add(3, 4)
.and_then(move |res| client.add(res, 5))
.join(rpc_client)
.map(|(res, ())| {
assert_eq!(res, 12);
})
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_send_notification() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_notification("completed", |params: Params| {
let (success,) = params.parse::<(bool,)>().expect("expected to receive one boolean");
assert_eq!(success, true);
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.completed(true)
.map(move |()| drop(client))
.join(rpc_client)
.map(|_| ())
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_handle_subscription() {
crate::logger::init_log();
// given
let mut handler = PubSubHandler::<local::LocalMeta, _>::default();
let called = Arc::new(AtomicBool::new(false));
let called2 = called.clone();
handler.add_subscription(
"hello",
("subscribe_hello", |params, _meta, subscriber: Subscriber| {
assert_eq!(params, core::Params::None);
let sink = subscriber
.assign_id(SubscriptionId::Number(5))
.expect("assigned subscription id");
std::thread::spawn(move || {
for i in 0..3 {
std::thread::sleep(std::time::Duration::from_millis(100));
let value = serde_json::json!({
"subscription": 5,
"result": vec![i],
});
sink.notify(serde_json::from_value(value).unwrap())
.wait()
.expect("sent notification");
}
});
}),
("unsubscribe_hello", move |id, _meta| {
// Should be called because session is dropped.
called2.store(true, Ordering::SeqCst);
assert_eq!(id, SubscriptionId::Number(5));
future::ok(core::Value::Bool(true))
}),
);
// when
let (client, rpc_client) = local::connect_with_pubsub::<TypedClient, _>(handler);
let received = Arc::new(std::sync::Mutex::new(vec![]));
let r2 = received.clone();
let fut = client
.subscribe::<_, (u32,)>("subscribe_hello", (), "hello", "unsubscribe_hello", "u32")
.and_then(|stream| {
stream
.into_future()
.map(move |(result, _)| {
drop(client);
r2.lock().unwrap().push(result.unwrap());
})
.map_err(|_| {
panic!("Expected message not received.");
})
})
.join(rpc_client)
.map(|(res, _)| {
log::info!("ok {:?}", res);
})
.map_err(|err| {
log::error!("err {:?}", err);
});
tokio::run(fut);
assert_eq!(called.load(Ordering::SeqCst), true);
assert!(
!received.lock().unwrap().is_empty(),
"Expected at least one received item."
);
}
}
| {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
} | identifier_body |
lib.rs | //! JSON-RPC client implementation.
#![deny(missing_docs)]
use failure::{format_err, Fail};
use futures::sync::{mpsc, oneshot};
use futures::{future, prelude::*};
use jsonrpc_core::{Error, Params};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::Value;
use std::marker::PhantomData;
pub mod transports;
#[cfg(test)]
mod logger;
/// The errors returned by the client.
#[derive(Debug, Fail)]
pub enum RpcError {
/// An error returned by the server.
#[fail(display = "Server returned rpc error {}", _0)]
JsonRpcError(Error),
/// Failure to parse server response.
#[fail(display = "Failed to parse server response as {}: {}", _0, _1)]
ParseError(String, failure::Error),
/// Request timed out.
#[fail(display = "Request timed out")]
Timeout,
/// Not rpc specific errors.
#[fail(display = "{}", _0)]
Other(failure::Error),
}
impl From<Error> for RpcError {
fn from(error: Error) -> Self {
RpcError::JsonRpcError(error)
}
}
/// An RPC call message.
struct CallMessage {
/// The RPC method name.
method: String,
/// The RPC method parameters.
params: Params,
/// The oneshot channel to send the result of the rpc
/// call to.
sender: oneshot::Sender<Result<Value, RpcError>>,
}
/// An RPC notification.
struct NotifyMessage {
/// The RPC method name.
method: String,
/// The RPC method paramters.
params: Params,
}
/// An RPC subscription.
struct Subscription {
/// The subscribe method name.
subscribe: String,
/// The subscribe method parameters.
subscribe_params: Params,
/// The name of the notification.
notification: String,
/// The unsubscribe method name.
unsubscribe: String,
}
/// An RPC subscribe message.
struct SubscribeMessage {
/// The subscription to subscribe to.
subscription: Subscription,
/// The channel to send notifications to.
sender: mpsc::Sender<Result<Value, RpcError>>,
}
/// A message sent to the `RpcClient`.
enum RpcMessage {
/// Make an RPC call.
Call(CallMessage),
/// Send a notification.
Notify(NotifyMessage),
/// Subscribe to a notification.
Subscribe(SubscribeMessage),
}
impl From<CallMessage> for RpcMessage {
fn from(msg: CallMessage) -> Self {
RpcMessage::Call(msg)
}
}
impl From<NotifyMessage> for RpcMessage {
fn from(msg: NotifyMessage) -> Self {
RpcMessage::Notify(msg)
}
}
impl From<SubscribeMessage> for RpcMessage {
fn from(msg: SubscribeMessage) -> Self {
RpcMessage::Subscribe(msg)
}
}
/// A channel to a `RpcClient`.
#[derive(Clone)]
pub struct RpcChannel(mpsc::Sender<RpcMessage>);
impl RpcChannel {
fn send(
&self,
msg: RpcMessage,
) -> impl Future<Item = mpsc::Sender<RpcMessage>, Error = mpsc::SendError<RpcMessage>> {
self.0.to_owned().send(msg)
}
}
impl From<mpsc::Sender<RpcMessage>> for RpcChannel {
fn from(sender: mpsc::Sender<RpcMessage>) -> Self {
RpcChannel(sender)
}
}
/// The future returned by the rpc call.
pub struct RpcFuture {
recv: oneshot::Receiver<Result<Value, RpcError>>,
}
impl RpcFuture {
/// Creates a new `RpcFuture`.
pub fn new(recv: oneshot::Receiver<Result<Value, RpcError>>) -> Self {
RpcFuture { recv }
}
}
impl Future for RpcFuture {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> {
// TODO should timeout (#410)
match self.recv.poll() {
Ok(Async::Ready(Ok(value))) => Ok(Async::Ready(value)),
Ok(Async::Ready(Err(error))) => Err(error),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err(RpcError::Other(error.into())),
}
}
}
/// The stream returned by a subscribe.
pub struct SubscriptionStream {
recv: mpsc::Receiver<Result<Value, RpcError>>,
}
impl SubscriptionStream {
/// Crates a new `SubscriptionStream`.
pub fn new(recv: mpsc::Receiver<Result<Value, RpcError>>) -> Self {
SubscriptionStream { recv }
}
}
impl Stream for SubscriptionStream {
type Item = Value;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
match self.recv.poll() {
Ok(Async::Ready(Some(Ok(value)))) => Ok(Async::Ready(Some(value))),
Ok(Async::Ready(Some(Err(error)))) => Err(error),
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(()) => Err(RpcError::Other(format_err!("mpsc channel returned an error."))),
}
}
}
/// A typed subscription stream.
pub struct TypedSubscriptionStream<T> {
_marker: PhantomData<T>,
returns: &'static str,
stream: SubscriptionStream,
}
impl<T> TypedSubscriptionStream<T> {
/// Creates a new `TypedSubscriptionStream`.
pub fn new(stream: SubscriptionStream, returns: &'static str) -> Self {
TypedSubscriptionStream {
_marker: PhantomData,
returns,
stream,
}
}
}
impl<T: DeserializeOwned +'static> Stream for TypedSubscriptionStream<T> {
type Item = T;
type Error = RpcError;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
let result = match self.stream.poll()? {
Async::Ready(Some(value)) => serde_json::from_value::<T>(value)
.map(|result| Async::Ready(Some(result)))
.map_err(|error| RpcError::ParseError(self.returns.into(), error.into()))?,
Async::Ready(None) => Async::Ready(None),
Async::NotReady => Async::NotReady,
};
Ok(result)
}
}
/// Client for raw JSON RPC requests
#[derive(Clone)]
pub struct RawClient(RpcChannel);
impl From<RpcChannel> for RawClient {
fn from(channel: RpcChannel) -> Self {
RawClient(channel)
}
}
impl RawClient {
/// Call RPC method with raw JSON.
pub fn call_method(&self, method: &str, params: Params) -> impl Future<Item = Value, Error = RpcError> {
let (sender, receiver) = oneshot::channel();
let msg = CallMessage {
method: method.into(),
params,
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.and_then(|_| RpcFuture::new(receiver))
}
/// Send RPC notification with raw JSON.
pub fn notify(&self, method: &str, params: Params) -> impl Future<Item = (), Error = RpcError> {
let msg = NotifyMessage {
method: method.into(),
params,
};
self.0
.send(msg.into())
.map(|_| ())
.map_err(|error| RpcError::Other(error.into()))
}
/// Subscribe to topic with raw JSON.
pub fn subscribe(
&self,
subscribe: &str,
subscribe_params: Params,
notification: &str,
unsubscribe: &str,
) -> impl Future<Item = SubscriptionStream, Error = RpcError> {
let (sender, receiver) = mpsc::channel(0);
let msg = SubscribeMessage {
subscription: Subscription {
subscribe: subscribe.into(),
subscribe_params,
notification: notification.into(),
unsubscribe: unsubscribe.into(),
},
sender,
};
self.0
.send(msg.into())
.map_err(|error| RpcError::Other(error.into()))
.map(|_| SubscriptionStream::new(receiver))
}
}
/// Client for typed JSON RPC requests
#[derive(Clone)]
pub struct TypedClient(RawClient);
impl From<RpcChannel> for TypedClient {
fn from(channel: RpcChannel) -> Self {
TypedClient(channel.into())
}
}
impl TypedClient {
/// Create a new `TypedClient`.
pub fn new(raw_cli: RawClient) -> Self {
TypedClient(raw_cli)
}
/// Call RPC with serialization of request and deserialization of response.
pub fn call_method<T: Serialize, R: DeserializeOwned +'static>(
&self,
method: &str,
returns: &'static str,
args: T,
) -> impl Future<Item = R, Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
Value::Object(map) => Params::Map(map),
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, JSON object or null"
))))
}
};
future::Either::B(self.0.call_method(method, params).and_then(move |value: Value| {
log::debug!("response: {:?}", value);
let result =
serde_json::from_value::<R>(value).map_err(|error| RpcError::ParseError(returns.into(), error.into()));
future::done(result)
}))
}
/// Call RPC with serialization of request only.
pub fn notify<T: Serialize>(&self, method: &str, args: T) -> impl Future<Item = (), Error = RpcError> {
let args =
serde_json::to_value(args).expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
future::Either::B(self.0.notify(method, params))
}
/// Subscribe with serialization of request and deserialization of response.
pub fn | <T: Serialize, R: DeserializeOwned +'static>(
&self,
subscribe: &str,
subscribe_params: T,
topic: &str,
unsubscribe: &str,
returns: &'static str,
) -> impl Future<Item = TypedSubscriptionStream<R>, Error = RpcError> {
let args = serde_json::to_value(subscribe_params)
.expect("Only types with infallible serialisation can be used for JSON-RPC");
let params = match args {
Value::Array(vec) => Params::Array(vec),
Value::Null => Params::None,
_ => {
return future::Either::A(future::err(RpcError::Other(format_err!(
"RPC params should serialize to a JSON array, or null"
))))
}
};
let typed_stream = self
.0
.subscribe(subscribe, params, topic, unsubscribe)
.map(move |stream| TypedSubscriptionStream::new(stream, returns));
future::Either::B(typed_stream)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transports::local;
use crate::{RpcChannel, RpcError, TypedClient};
use jsonrpc_core::{self as core, IoHandler};
use jsonrpc_pubsub::{PubSubHandler, Subscriber, SubscriptionId};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[derive(Clone)]
struct AddClient(TypedClient);
impl From<RpcChannel> for AddClient {
fn from(channel: RpcChannel) -> Self {
AddClient(channel.into())
}
}
impl AddClient {
fn add(&self, a: u64, b: u64) -> impl Future<Item = u64, Error = RpcError> {
self.0.call_method("add", "u64", (a, b))
}
fn completed(&self, success: bool) -> impl Future<Item = (), Error = RpcError> {
self.0.notify("completed", (success,))
}
}
#[test]
fn test_client_terminates() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_method("add", |params: Params| {
let (a, b) = params.parse::<(u64, u64)>()?;
let res = a + b;
Ok(jsonrpc_core::to_value(res).unwrap())
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.add(3, 4)
.and_then(move |res| client.add(res, 5))
.join(rpc_client)
.map(|(res, ())| {
assert_eq!(res, 12);
})
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_send_notification() {
crate::logger::init_log();
let mut handler = IoHandler::new();
handler.add_notification("completed", |params: Params| {
let (success,) = params.parse::<(bool,)>().expect("expected to receive one boolean");
assert_eq!(success, true);
});
let (client, rpc_client) = local::connect::<AddClient, _, _>(handler);
let fut = client
.clone()
.completed(true)
.map(move |()| drop(client))
.join(rpc_client)
.map(|_| ())
.map_err(|err| {
eprintln!("{:?}", err);
assert!(false);
});
tokio::run(fut);
}
#[test]
fn should_handle_subscription() {
crate::logger::init_log();
// given
let mut handler = PubSubHandler::<local::LocalMeta, _>::default();
let called = Arc::new(AtomicBool::new(false));
let called2 = called.clone();
handler.add_subscription(
"hello",
("subscribe_hello", |params, _meta, subscriber: Subscriber| {
assert_eq!(params, core::Params::None);
let sink = subscriber
.assign_id(SubscriptionId::Number(5))
.expect("assigned subscription id");
std::thread::spawn(move || {
for i in 0..3 {
std::thread::sleep(std::time::Duration::from_millis(100));
let value = serde_json::json!({
"subscription": 5,
"result": vec![i],
});
sink.notify(serde_json::from_value(value).unwrap())
.wait()
.expect("sent notification");
}
});
}),
("unsubscribe_hello", move |id, _meta| {
// Should be called because session is dropped.
called2.store(true, Ordering::SeqCst);
assert_eq!(id, SubscriptionId::Number(5));
future::ok(core::Value::Bool(true))
}),
);
// when
let (client, rpc_client) = local::connect_with_pubsub::<TypedClient, _>(handler);
let received = Arc::new(std::sync::Mutex::new(vec![]));
let r2 = received.clone();
let fut = client
.subscribe::<_, (u32,)>("subscribe_hello", (), "hello", "unsubscribe_hello", "u32")
.and_then(|stream| {
stream
.into_future()
.map(move |(result, _)| {
drop(client);
r2.lock().unwrap().push(result.unwrap());
})
.map_err(|_| {
panic!("Expected message not received.");
})
})
.join(rpc_client)
.map(|(res, _)| {
log::info!("ok {:?}", res);
})
.map_err(|err| {
log::error!("err {:?}", err);
});
tokio::run(fut);
assert_eq!(called.load(Ordering::SeqCst), true);
assert!(
!received.lock().unwrap().is_empty(),
"Expected at least one received item."
);
}
}
| subscribe | identifier_name |
mod.rs | },
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() {
RawWindowHandle::Windows(handle) => handle.hwnd as HWND,
_ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> |
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class...
// * Which is used to setup a dummy OpenGL context...
// * Which is used to load OpenGL extensions...
// * Which are used to set more specific pixel formats and specify an OpenGL version...
// * Which is used to create another dummy window...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if!wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: * | {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
} | identifier_body |
mod.rs | },
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() {
RawWindowHandle::Windows(handle) => handle.hwnd as HWND,
_ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 |
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class...
// * Which is used to setup a dummy OpenGL context...
// * Which is used to load OpenGL extensions...
// * Which are used to set more specific pixel formats and specify an OpenGL version...
// * Which is used to create another dummy window...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if!wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: * | {
panic!("Failed to release device context");
} | conditional_block |
mod.rs | },
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() {
RawWindowHandle::Windows(handle) => handle.hwnd as HWND,
_ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn | (&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class...
// * Which is used to setup a dummy OpenGL context...
// * Which is used to load OpenGL extensions...
// * Which are used to set more specific pixel formats and specify an OpenGL version...
// * Which is used to create another dummy window...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if!wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: * | get_proc_address | identifier_name |
mod.rs | },
}
}
}
impl GLContextTrait for GLContext {
fn get_attributes(&self) -> GLContextAttributes {
todo!()
}
// This does not correctly handle unsetting a window.
fn set_window(
&mut self,
window: Option<&impl raw_window_handle::HasRawWindowHandle>,
) -> Result<(), SetWindowError> {
use raw_window_handle::*;
unsafe {
let window_handle = window
.map(|w| match w.raw_window_handle() { | _ => unreachable!(),
})
.unwrap();
let window_device_context = if let Some(_window) = window {
if let Some(current_device_context) = self.device_context {
ReleaseDC(window_handle, current_device_context);
}
let device_context = GetDC(window_handle);
self.device_context = Some(device_context);
device_context
} else {
std::ptr::null_mut() as HDC
};
let pixel_format_descriptor: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
// This will error if the window was previously set with an incompatible
// pixel format.
if SetPixelFormat(
window_device_context,
self.pixel_format_id,
&pixel_format_descriptor,
) == 0
{
return Err(SetWindowError::MismatchedPixelFormat);
}
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr)).unwrap();
// self.set_vsync(self.vsync).unwrap(); // Everytime a device context is requested, vsync must be updated.
self.current_window = if let Some(_window) = window {
Some(window_handle)
} else {
None
};
self.set_vsync(self.vsync).unwrap();
}
Ok(())
}
// Is this behavior correct? Does it really work if called from another thread?
fn make_current(&mut self) -> Result<(), std::io::Error> {
unsafe {
let window_device_context = self.device_context.unwrap_or(std::ptr::null_mut());
error_if_false(wglMakeCurrent(window_device_context, self.context_ptr))
}
}
fn swap_buffers(&mut self) {
if let Some(device_context) = self.device_context {
unsafe {
SwapBuffers(device_context);
}
}
}
fn resize(&mut self) {}
// wglSwapIntervalEXT sets VSync for the window bound to the current context.
// However here we treat Vsync as a setting on the GLContext,
// so whenever a window is bound we update the GL Context.
fn set_vsync(&mut self, vsync: VSync) -> Result<(), Error> {
if self.current_window.is_some() {
// This call to swap_buffers seems to prevent an issue on Macbooks
// where the setting wouldn't take effect.
// I suspect wglSwapIntervalEXT doesn't get set if a lock of some
// sort is held on back/front buffers, so rendering here ensures that's unlikely
// to happen.
self.swap_buffers();
if match vsync {
VSync::Off => wglSwapIntervalEXT(0),
VSync::On => wglSwapIntervalEXT(1),
VSync::Adaptive => wglSwapIntervalEXT(-1),
VSync::Other(i) => wglSwapIntervalEXT(i),
} == false
{
Err(Error::last_os_error())
} else {
self.vsync = vsync;
Ok(())
}
} else {
Ok(()) // Nothing happens, should an error be returned?
}
}
fn get_vsync(&self) -> VSync {
match wglGetSwapIntervalEXT() {
0 => VSync::Off,
1 => VSync::On,
-1 => VSync::Adaptive,
i => VSync::Other(i),
}
}
fn get_proc_address(&self, address: &str) -> *const core::ffi::c_void {
get_proc_address_inner(self.opengl_module, address)
}
}
fn get_proc_address_inner(opengl_module: HMODULE, address: &str) -> *const core::ffi::c_void {
unsafe {
let name = std::ffi::CString::new(address).unwrap();
let mut result = wglGetProcAddress(name.as_ptr() as *const i8) as *const std::ffi::c_void;
if result.is_null() {
// Functions that were part of OpenGL1 need to be loaded differently.
result = GetProcAddress(opengl_module, name.as_ptr() as *const i8)
as *const std::ffi::c_void;
}
/*
if result.is_null() {
println!("FAILED TO LOAD: {}", address);
} else {
println!("Loaded: {} {:?}", address, result);
}
*/
result
}
}
impl Drop for GLContext {
fn drop(&mut self) {
unsafe {
if wglDeleteContext(self.context_ptr) == 0 {
panic!("Failed to delete OpenGL Context");
}
if let Some(hdc) = self.device_context {
if ReleaseDC(self.current_window.unwrap(), hdc) == 0 {
panic!("Failed to release device context");
}
}
}
}
}
impl GLContextBuilder {
pub fn build(&self) -> Result<GLContext, ()> {
Ok(new_opengl_context(
self.gl_attributes.color_bits,
self.gl_attributes.alpha_bits,
self.gl_attributes.depth_bits,
self.gl_attributes.stencil_bits,
self.gl_attributes.msaa_samples,
self.gl_attributes.major_version,
self.gl_attributes.minor_version,
self.gl_attributes.srgb,
)
.unwrap())
}
}
/// Creates an OpenGL context.
/// h_instance is the parent module's h_instance
/// class_name is the parent class's name
/// panic_if_fail will crash the program with a useful callstack if something goes wrong
/// color bits and alpha bits should add up to 32
pub fn new_opengl_context(
color_bits: u8,
alpha_bits: u8,
depth_bits: u8,
stencil_bits: u8,
msaa_samples: u8,
major_version: u8,
minor_version: u8,
srgb: bool,
) -> Result<GLContext, Error> {
// This function performs the following steps:
// * First register the window class.
// * Then create a dummy_window with that class...
// * Which is used to setup a dummy OpenGL context...
// * Which is used to load OpenGL extensions...
// * Which are used to set more specific pixel formats and specify an OpenGL version...
// * Which is used to create another dummy window...
// * Which is used to create the final OpenGL context!
unsafe {
// Register the window class.
let window_class_name = win32_string("kapp_gl_window");
let h_instance = GetModuleHandleW(null_mut());
let window_class = WNDCLASSW {
style: 0,
lpfnWndProc: Some(kapp_gl_window_callback),
cbClsExtra: 0,
cbWndExtra: 0,
hInstance: h_instance,
hIcon: null_mut(),
hCursor: null_mut(), // This may not be what is desired. Potentially this makes it annoying to change the cursor later.
hbrBackground: null_mut(),
lpszMenuName: null_mut(),
lpszClassName: window_class_name.as_ptr(),
};
RegisterClassW(&window_class);
// Then create a dummy window
let h_instance = GetModuleHandleW(null_mut());
let dummy_window = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window)?;
// DC stands for 'device context'
// Definition of a device context:
// https://docs.microsoft.com/en-us/windows/win32/gdi/device-contexts
let dummy_window_dc = GetDC(dummy_window);
error_if_null(dummy_window_dc)?;
// Create a dummy PIXELFORMATDESCRIPTOR (PFD).
// This PFD is based on the recommendations from here:
// https://www.khronos.org/opengl/wiki/Creating_an_OpenGL_Context_(WGL)#Create_a_False_Context
let mut dummy_pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
dummy_pfd.nSize = size_of::<PIXELFORMATDESCRIPTOR>() as u16;
dummy_pfd.nVersion = 1;
dummy_pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
dummy_pfd.iPixelType = PFD_TYPE_RGBA as u8;
dummy_pfd.cColorBits = 32;
dummy_pfd.cAlphaBits = 8;
dummy_pfd.cDepthBits = 24;
let dummy_pixel_format_id = ChoosePixelFormat(dummy_window_dc, &dummy_pfd);
error_if_false(dummy_pixel_format_id)?;
error_if_false(SetPixelFormat(
dummy_window_dc,
dummy_pixel_format_id,
&dummy_pfd,
))?;
// Create the dummy OpenGL context.
let dummy_opengl_context = wglCreateContext(dummy_window_dc);
error_if_null(dummy_opengl_context)?;
error_if_false(wglMakeCurrent(dummy_window_dc, dummy_opengl_context))?;
// Load the function to choose a pixel format.
wglChoosePixelFormatARB_ptr = wgl_get_proc_address("wglChoosePixelFormatARB")?;
// Load the function to create an OpenGL context with extra attributes.
wglCreateContextAttribsARB_ptr = wgl_get_proc_address("wglCreateContextAttribsARB")?;
// Create the second dummy window.
let dummy_window2 = create_dummy_window(h_instance, &window_class_name);
error_if_null(dummy_window2)?;
// DC is 'device context'
let dummy_window_dc2 = GetDC(dummy_window2);
error_if_null(dummy_window_dc2)?;
// Setup the actual pixel format we'll use.
// Later this is where we'll specify pixel format parameters.
// Documentation about these flags here:
// https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_pixel_format.txt
let pixel_attributes = vec![
WGL_DRAW_TO_WINDOW_ARB,
TRUE as i32,
WGL_SUPPORT_OPENGL_ARB,
TRUE as i32,
WGL_DOUBLE_BUFFER_ARB,
TRUE as i32,
WGL_PIXEL_TYPE_ARB,
WGL_TYPE_RGBA_ARB,
WGL_ACCELERATION_ARB,
WGL_FULL_ACCELERATION_ARB,
WGL_COLOR_BITS_ARB,
color_bits as i32,
WGL_ALPHA_BITS_ARB,
alpha_bits as i32,
WGL_DEPTH_BITS_ARB,
depth_bits as i32,
WGL_STENCIL_BITS_ARB,
stencil_bits as i32,
WGL_SAMPLE_BUFFERS_ARB,
1,
WGL_SAMPLES_ARB,
msaa_samples as i32,
WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB,
if srgb { TRUE as i32 } else { FALSE as i32 },
0,
];
let mut pixel_format_id = 0;
let mut number_of_formats = 0;
error_if_false(wglChoosePixelFormatARB(
dummy_window_dc2,
pixel_attributes.as_ptr(),
null_mut(),
1,
&mut pixel_format_id,
&mut number_of_formats,
))?;
error_if_false(number_of_formats as i32)?; // error_if_false just errors if the argument is 0, which is what we need here
// PFD stands for 'pixel format descriptor'
// It's unclear why this call to DescribePixelFormat is needed?
// DescribePixelFormat fills the pfd with a description of the pixel format.
// But why does this window need the same pixel format as the previous one?
// Just it just need a valid pixel format?
let mut pfd: PIXELFORMATDESCRIPTOR = std::mem::zeroed();
DescribePixelFormat(
dummy_window_dc2,
pixel_format_id,
size_of::<PIXELFORMATDESCRIPTOR>() as u32,
&mut pfd,
);
SetPixelFormat(dummy_window_dc2, pixel_format_id, &pfd);
// Finally we can create the OpenGL context!
// Need to allow for choosing major and minor version.
let major_version_minimum = major_version as i32;
let minor_version_minimum = minor_version as i32;
let context_attributes = [
WGL_CONTEXT_MAJOR_VERSION_ARB,
major_version_minimum,
WGL_CONTEXT_MINOR_VERSION_ARB,
minor_version_minimum,
WGL_CONTEXT_PROFILE_MASK_ARB,
WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0,
];
let opengl_context = wglCreateContextAttribsARB(
dummy_window_dc2,
0 as HGLRC, // An existing OpenGL context to share resources with. 0 means none.
context_attributes.as_ptr(),
);
error_if_null(opengl_context)?;
// Clean up all of our resources
// It's bad that these calls only occur if all the prior steps were succesful.
// If a program were to recover from a failure to setup an OpenGL context these resources would be leaked.
wglMakeCurrent(dummy_window_dc, null_mut());
wglDeleteContext(dummy_opengl_context);
ReleaseDC(dummy_window, dummy_window_dc);
DestroyWindow(dummy_window);
error_if_false(wglMakeCurrent(dummy_window_dc2, opengl_context))?;
let opengl_module = LoadLibraryA("opengl32.dll\0".as_ptr() as *const i8);
// Load swap interval for Vsync
let function_pointer = wglGetProcAddress("wglSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
let function_pointer = wglGetProcAddress("wglGetSwapIntervalEXT\0".as_ptr() as *const i8);
if function_pointer.is_null() {
println!("Could not find wglGetSwapIntervalEXT");
return Err(Error::last_os_error());
} else {
wglGetSwapIntervalEXT_ptr = function_pointer as *const std::ffi::c_void;
}
// Default to Vsync enabled
if!wglSwapIntervalEXT(1) {
return Err(Error::last_os_error());
}
// Will the dummy window be rendererd to if no other window is made current?
ReleaseDC(dummy_window2, dummy_window_dc2);
DestroyWindow(dummy_window2);
// Disconnects from current window
// Uncommenting this line can cause intermittment crashes
// It's unclear why, as this should just disconnect the dummy window context
// However leaving this commented should be harmless.
// Actually, it just improves the situation, but doesn't prevent it.
//wglMakeCurrent(dummy_window_dc2, null_mut());
Ok(GLContext {
context_ptr: opengl_context,
pixel_format_id,
_pixel_format_descriptor: pfd,
opengl_module,
current_window: None,
vsync: VSync::On,
device_context: None,
})
}
}
fn create_dummy_window(h_instance: HINSTANCE, class_name: &Vec<u16>) -> HWND {
let title = win32_string("kapp Placeholder");
unsafe {
// https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw
CreateWindowExW(
0, // extended style Is this ok?
class_name.as_ptr(), // A class created by RegisterClass
title.as_ptr(), // window title
WS_CLIPSIBLINGS | WS_CLIPCHILDREN, // style
0, // x position
0, // y position
1, // width
1, // height
null_mut(), // parent window
null_mut(), // menu
h_instance, // Module handle
null_mut(), // Data sent to window
)
}
}
pub unsafe extern "system" fn kapp_gl_window_callback(
hwnd: HWND,
u_msg: UINT,
w_param: WPARAM,
l_param: LPARAM,
) -> LRESULT {
// DefWindowProcW is the default Window event handler.
DefWindowProcW(hwnd, u_msg, w_param, l_param)
}
fn wgl_get_proc_address(name: &str) -> Result<*const c_void, Error> {
let name = std::ffi::CString::new(name).unwrap();
let result = unsafe { wglGetProcAddress(name.as_ptr() as *const i8) as *const c_void };
error_if_null(result)?;
Ok(result)
}
// These definitions are based on the wglext.h header available here:
// https://www.khronos.org/registry/OpenGL/api/GL/wglext.h
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglChoosePixelFormatARB_ptr: *const c_void = std::ptr::null();
#[allow(non_snake_case, non_upper_case_globals)]
fn wglChoosePixelFormatARB(
hdc: HDC,
piAttribIList: *const c_int,
pfAttribFList: *const c_float,
nMaxFormats: c_uint,
piFormats: *mut c_int,
nNumFormats: *mut c_uint,
) -> c_int {
unsafe {
std::mem::transmute::<
_,
extern "system" fn(
HDC,
*const c_int,
*const c_float,
c_uint,
*mut c_int,
*mut c_uint,
) -> c_int,
>(wglChoosePixelFormatARB_ptr)(
hdc,
piAttribIList,
pfAttribFList,
nMaxFormats,
piFormats,
nNumFormats,
)
}
}
#[allow(non_snake_case, non_upper_case_globals)]
static mut wglCreateContextAttribsARB_ptr: *const c | RawWindowHandle::Windows(handle) => handle.hwnd as HWND, | random_line_split |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => |
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send +'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send +'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator,.. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
}
| {
let return_value = to_value(&repos)?;
return Ok(return_value);
} | conditional_block |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
} | }
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send +'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send +'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator,.. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
} |
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) } | random_line_split |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
}
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send +'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send +'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator,.. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn | () {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
}
| serde_repo_configuration | identifier_name |
facade.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common_utils::common::get_proxy_or_connect;
use crate::repository_manager::types::RepositoryOutput;
use anyhow::{format_err, Error};
use fidl_fuchsia_pkg::{RepositoryManagerMarker, RepositoryManagerProxy};
use fidl_fuchsia_pkg_ext::RepositoryConfig;
use fuchsia_syslog::macros::fx_log_info;
use fuchsia_zircon as zx;
use parking_lot::RwLock;
use serde_json::{from_value, to_value, Value};
use std::convert::TryFrom;
/// Facade providing access to RepositoryManager interfaces.
#[derive(Debug)]
pub struct RepositoryManagerFacade {
proxy: RwLock<Option<RepositoryManagerProxy>>,
}
impl RepositoryManagerFacade {
pub fn new() -> Self {
Self { proxy: RwLock::new(None) }
}
#[cfg(test)]
fn new_with_proxy(proxy: RepositoryManagerProxy) -> Self {
Self { proxy: RwLock::new(Some(proxy)) }
}
fn proxy(&self) -> Result<RepositoryManagerProxy, Error> {
get_proxy_or_connect::<RepositoryManagerMarker>(&self.proxy)
}
/// Lists repositories using the repository_manager fidl service.
///
/// Returns a list containing repository info in the format of
/// RepositoryConfig.
pub async fn list_repo(&self) -> Result<Value, Error> {
match self.fetch_repos().await {
Ok(repos) => {
let return_value = to_value(&repos)?;
return Ok(return_value);
}
Err(err) => {
return Err(format_err!("Listing Repositories failed with error {:?}", err))
}
};
}
/// Add a new source to an existing repository.
///
/// params format uses RepositoryConfig, example:
/// {
/// "repo_url": "fuchsia-pkg://example.com",
/// "root_keys":[
/// {
/// "type":"ed25519",
/// "value":"00"
/// }],
/// "mirrors": [
/// {
/// "mirror_url": "http://example.org/",
/// "subscribe": true
/// }],
/// "update_package_url": "fuchsia-pkg://update.example.com/update",
/// "root_version": 1,
/// "root_threshold": 1,
/// }
pub async fn add(&self, args: Value) -> Result<Value, Error> |
/// Fetches repositories using repository_manager.list FIDL service.
async fn fetch_repos(&self) -> Result<Vec<RepositoryConfig>, anyhow::Error> {
let (iter, server_end) = fidl::endpoints::create_proxy()?;
self.proxy()?.list(server_end)?;
let mut repos = vec![];
loop {
let chunk = iter.next().await?;
if chunk.is_empty() {
break;
}
repos.extend(chunk);
}
repos
.into_iter()
.map(|repo| RepositoryConfig::try_from(repo).map_err(|e| anyhow::Error::from(e)))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common_utils::test::assert_value_round_trips_as;
use fidl_fuchsia_pkg::{RepositoryIteratorRequest, RepositoryManagerRequest};
use fidl_fuchsia_pkg_ext::{
MirrorConfigBuilder, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
};
use fuchsia_syslog::macros::{fx_log_err, fx_log_info};
use fuchsia_url::pkg_url::{PkgUrl, RepoUrl};
use futures::{future::Future, join, StreamExt, TryFutureExt, TryStreamExt};
use http::Uri;
use matches::assert_matches;
use parking_lot::Mutex;
use serde_json::json;
use std::iter::FusedIterator;
fn make_test_repo_config() -> RepositoryConfig {
RepositoryConfigBuilder::new(RepoUrl::new("example.com".to_string()).expect("valid url"))
.add_root_key(RepositoryKey::Ed25519(vec![0u8]))
.add_mirror(
MirrorConfigBuilder::new("http://example.org".parse::<Uri>().unwrap())
.unwrap()
.subscribe(true)
.build(),
)
.update_package_url(
PkgUrl::parse("fuchsia-pkg://update.example.com/update").expect("valid PkgUrl"),
)
.build()
}
struct MockRepositoryManagerBuilder {
expected: Vec<Box<dyn FnOnce(RepositoryManagerRequest) + Send +'static>>,
repos: Mutex<Vec<RepositoryConfig>>,
}
impl MockRepositoryManagerBuilder {
fn new() -> Self {
Self { expected: vec![], repos: Mutex::new(vec![]) }
}
fn push(mut self, request: impl FnOnce(RepositoryManagerRequest) + Send +'static) -> Self {
self.expected.push(Box::new(request));
self
}
fn add_repository(self, repo_config: RepositoryConfig) -> Self {
self.repos.lock().push(repo_config);
self
}
fn expect_list_repository(self) -> Self {
let mut repos = self.repos.lock().clone().into_iter().map(|r| r.into());
self.push(move |req| match req {
RepositoryManagerRequest::List { iterator,.. } => {
let mut stream = iterator.into_stream().expect("list iterator into_stream");
// repos must be fused b/c the Next() fidl method should return an empty vector
// forever after iteration is complete
let _: &dyn FusedIterator<Item = _> = &repos;
fuchsia_async::Task::spawn(
async move {
while let Some(RepositoryIteratorRequest::Next { responder }) =
stream.try_next().await?
{
responder.send(&mut repos.by_ref().take(5)).expect("next send")
}
Ok(())
}
.unwrap_or_else(|e: anyhow::Error| {
fx_log_err!("error running list protocol: {:#}", e)
}),
)
.detach();
}
req => panic!("unexpected request: {:?}", req),
})
}
fn expect_add_repository(self, repo_add: RepositoryConfig) -> Self {
self.push(move |req| match req {
RepositoryManagerRequest::Add { repo, responder } => {
let new_repo = RepositoryConfig::try_from(repo).expect("valid repo config");
assert_eq!(new_repo, repo_add);
responder.send(&mut Ok(())).expect("send ok");
}
req => panic!("unexpected request: {:?}", req),
})
}
fn build(self) -> (RepositoryManagerFacade, impl Future<Output = ()>) {
let (proxy, mut stream) =
fidl::endpoints::create_proxy_and_stream::<RepositoryManagerMarker>().unwrap();
let fut = async move {
for expected in self.expected {
expected(stream.next().await.unwrap().unwrap());
}
assert_matches!(stream.next().await, None);
};
(RepositoryManagerFacade::new_with_proxy(proxy), fut)
}
}
#[test]
fn serde_repo_configuration() {
let repo_config = make_test_repo_config();
assert_value_round_trips_as(
repo_config,
json!(
{
"repo_url": "fuchsia-pkg://example.com",
"root_keys":[
{
"type":"ed25519",
"value":"00"
}],
"mirrors": [
{
"mirror_url": "http://example.org/",
"subscribe": true
}],
"update_package_url": "fuchsia-pkg://update.example.com/update",
"root_version": 1,
"root_threshold": 1,
}),
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn list_repository_ok() {
let (facade, repository_manager) = MockRepositoryManagerBuilder::new()
.add_repository(make_test_repo_config())
.expect_list_repository()
.build();
let test = async move {
let config = facade.list_repo().await.unwrap();
fx_log_info!("Repo listed: {:?}", config);
let mut repo_config: Vec<RepositoryConfig> = from_value(config).unwrap();
assert_eq!(repo_config.len(), 1);
let received_repo = repo_config.pop().unwrap();
let expected_pkg_url =
PkgUrl::parse("fuchsia-pkg://update.example.com/update").unwrap();
match received_repo.update_package_url() {
Some(u) => assert_eq!(u.to_string(), expected_pkg_url.to_string()),
None => fx_log_err!("update_package_url is empty."),
}
};
join!(repository_manager, test);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn add_repository_ok() {
let repo_test = make_test_repo_config();
let (facade, repository_manager) =
MockRepositoryManagerBuilder::new().expect_add_repository(repo_test.clone()).build();
let test = async move {
let status = facade.add(to_value(repo_test.clone()).unwrap()).await.unwrap();
assert_matches!(from_value(status).unwrap(), RepositoryOutput::Success)
};
join!(repository_manager, test);
}
}
| {
let add_request: RepositoryConfig = from_value(args)?;
fx_log_info!("Add Repo request received {:?}", add_request);
let res = self.proxy()?.add(add_request.into()).await?;
match res.map_err(zx::Status::from_raw) {
Ok(()) => Ok(to_value(RepositoryOutput::Success)?),
_ => Err(format_err!("Add repo errored with code {:?}", res)),
}
} | identifier_body |
transaction.rs | //! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use hash::{Hash, Hasher};
use serde::Serialize;
use sha2::Sha512;
use signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::pubkey::Pubkey;
use std::mem::size_of;
pub const SIGNED_DATA_OFFSET: usize = size_of::<Signature>();
pub const SIG_OFFSET: usize = 0;
pub const PUB_KEY_OFFSET: usize = size_of::<Signature>() + size_of::<u64>();
/// An instruction to execute a program under the `program_id` of `program_ids_index` with the
/// specified accounts and userdata
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Instruction {
/// The program code that executes this transaction is identified by the program_id.
/// this is an offset into the Transaction::program_ids field
pub program_ids_index: u8,
/// Indices into the keys array of which accounts to load
pub accounts: Vec<u8>,
/// Userdata to be stored in the account
pub userdata: Vec<u8>,
}
impl Instruction {
pub fn new<T: Serialize>(program_ids_index: u8, userdata: &T, accounts: Vec<u8>) -> Self {
let userdata = serialize(userdata).unwrap();
Instruction {
program_ids_index,
userdata,
accounts,
}
}
}
/// An atomic transaction
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
/// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`.
pub signature: Signature,
/// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is
/// program-specific.
/// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0].
/// In the future which key pays the fee and which keys have signatures would be configurable.
/// * account_keys[1] - Typically this is the program context or the recipient of the tokens
pub account_keys: Vec<Pubkey>,
/// The ID of a recent ledger entry.
pub last_id: Hash,
/// The number of tokens paid for processing and storage of this transaction.
pub fee: u64,
/// Keys identifying programs in the instructions vector.
pub program_ids: Vec<Pubkey>,
/// Programs that will be executed in sequence and commited in one atomic transaction if all
/// succeed.
pub instructions: Vec<Instruction>,
}
impl Transaction {
pub fn | <T: Serialize>(
from_keypair: &Keypair,
transaction_keys: &[Pubkey],
program_id: Pubkey,
userdata: &T,
last_id: Hash,
fee: u64,
) -> Self {
let program_ids = vec![program_id];
let accounts = (0..=transaction_keys.len() as u8).collect();
let instructions = vec![Instruction::new(0, userdata, accounts)];
Self::new_with_instructions(
from_keypair,
transaction_keys,
last_id,
fee,
program_ids,
instructions,
)
}
/// Create a signed transaction
/// * `from_keypair` - The key used to sign the transaction. This key is stored as keys[0]
/// * `account_keys` - The keys for the transaction. These are the program state
/// instances or token recipient keys.
/// * `last_id` - The PoH hash.
/// * `fee` - The transaction fee.
/// * `program_ids` - The keys that identify programs used in the `instruction` vector.
/// * `instructions` - The programs and their arguments that the transaction will execute atomically
pub fn new_with_instructions(
from_keypair: &Keypair,
keys: &[Pubkey],
last_id: Hash,
fee: u64,
program_ids: Vec<Pubkey>,
instructions: Vec<Instruction>,
) -> Self {
let from = from_keypair.pubkey();
let mut account_keys = vec![from];
account_keys.extend_from_slice(keys);
let mut tx = Transaction {
signature: Signature::default(),
account_keys,
last_id: Hash::default(),
fee,
program_ids,
instructions,
};
tx.sign(from_keypair, last_id);
tx
}
pub fn userdata(&self, instruction_index: usize) -> &[u8] {
&self.instructions[instruction_index].userdata
}
fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> {
self.instructions
.get(instruction_index)
.and_then(|instruction| instruction.accounts.get(accounts_index))
.map(|&account_keys_index| account_keys_index as usize)
}
pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
self.key_index(instruction_index, accounts_index)
.and_then(|account_keys_index| self.account_keys.get(account_keys_index))
}
pub fn signed_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
match self.key_index(instruction_index, accounts_index) {
None => None,
Some(0) => self.account_keys.get(0),
Some(_) => None,
}
}
pub fn program_id(&self, instruction_index: usize) -> &Pubkey {
let program_ids_index = self.instructions[instruction_index].program_ids_index;
&self.program_ids[program_ids_index as usize]
}
/// Get the transaction data to sign.
pub fn get_sign_data(&self) -> Vec<u8> {
let mut data = serialize(&self.account_keys).expect("serialize account_keys");
let last_id_data = serialize(&self.last_id).expect("serialize last_id");
data.extend_from_slice(&last_id_data);
let fee_data = serialize(&self.fee).expect("serialize fee");
data.extend_from_slice(&fee_data);
let program_ids = serialize(&self.program_ids).expect("serialize program_ids");
data.extend_from_slice(&program_ids);
let instructions = serialize(&self.instructions).expect("serialize instructions");
data.extend_from_slice(&instructions);
data
}
/// Sign this transaction.
pub fn sign(&mut self, keypair: &Keypair, last_id: Hash) {
self.last_id = last_id;
let sign_data = self.get_sign_data();
self.signature = Signature::new(&keypair.sign::<Sha512>(&sign_data).to_bytes());
}
/// Verify only the transaction signature.
pub fn verify_signature(&self) -> bool {
warn!("transaction signature verification called");
self.signature
.verify(&self.from().as_ref(), &self.get_sign_data())
}
/// Verify that references in the instructions are valid
pub fn verify_refs(&self) -> bool {
for instruction in &self.instructions {
if (instruction.program_ids_index as usize) >= self.program_ids.len() {
return false;
}
for account_index in &instruction.accounts {
if (*account_index as usize) >= self.account_keys.len() {
return false;
}
}
}
true
}
pub fn from(&self) -> &Pubkey {
&self.account_keys[0]
}
// a hash of a slice of transactions only needs to hash the signatures
pub fn hash(transactions: &[Transaction]) -> Hash {
let mut hasher = Hasher::default();
transactions
.iter()
.for_each(|tx| hasher.hash(&tx.signature.as_ref()));
hasher.result()
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use signature::GenKeys;
#[test]
fn test_refs() {
let key = Keypair::new();
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let prog1 = Keypair::new().pubkey();
let prog2 = Keypair::new().pubkey();
let instructions = vec![
Instruction::new(0, &(), vec![0, 1]),
Instruction::new(1, &(), vec![0, 2]),
];
let tx = Transaction::new_with_instructions(
&key,
&[key1, key2],
Default::default(),
0,
vec![prog1, prog2],
instructions,
);
assert!(tx.verify_refs());
assert_eq!(tx.key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.signed_key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.signed_key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.key(0, 1), Some(&key1));
assert_eq!(tx.signed_key(0, 1), None);
assert_eq!(tx.key(1, 1), Some(&key2));
assert_eq!(tx.signed_key(1, 1), None);
assert_eq!(tx.key(2, 0), None);
assert_eq!(tx.signed_key(2, 0), None);
assert_eq!(tx.key(0, 2), None);
assert_eq!(tx.signed_key(0, 2), None);
assert_eq!(*tx.program_id(0), prog1);
assert_eq!(*tx.program_id(1), prog2);
}
#[test]
fn test_refs_invalid_program_id() {
let key = Keypair::new();
let instructions = vec![Instruction::new(1, &(), vec![])];
let tx = Transaction::new_with_instructions(
&key,
&[],
Default::default(),
0,
vec![],
instructions,
);
assert!(!tx.verify_refs());
}
#[test]
fn test_refs_invalid_account() {
let key = Keypair::new();
let instructions = vec![Instruction::new(0, &(), vec![1])];
let tx = Transaction::new_with_instructions(
&key,
&[],
Default::default(),
0,
vec![Default::default()],
instructions,
);
assert_eq!(*tx.program_id(0), Default::default());
assert!(!tx.verify_refs());
}
/// Detect binary changes in the serialized contract userdata, which could have a downstream
/// affect on SDKs and DApps
#[test]
fn test_sdk_serialize() {
let keypair = &GenKeys::new([0u8; 32]).gen_n_keypairs(1)[0];
let to = Pubkey::new(&[
1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4,
1, 1, 1,
]);
let program_id = Pubkey::new(&[
2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 6, 5, 4,
2, 2, 2,
]);
let tx = Transaction::new(
keypair,
&[keypair.pubkey(), to],
program_id,
&(1u8, 2u8, 3u8),
Hash::default(),
99,
);
assert_eq!(
serialize(&tx).unwrap(),
vec![
238, 228, 120, 18, 14, 44, 44, 74, 186, 124, 104, 174, 137, 227, 237, 157, 147, 37,
230, 74, 20, 48, 234, 36, 170, 60, 68, 184, 171, 240, 203, 18, 255, 110, 164, 67,
212, 206, 115, 182, 13, 90, 38, 215, 191, 51, 79, 183, 57, 102, 248, 221, 114, 72,
120, 66, 113, 146, 251, 102, 69, 187, 25, 8, 3, 0, 0, 0, 0, 0, 0, 0, 218, 65, 89,
124, 81, 87, 72, 141, 119, 36, 224, 63, 184, 216, 74, 55, 106, 67, 184, 244, 21,
24, 161, 28, 195, 135, 182, 105, 178, 238, 101, 134, 218, 65, 89, 124, 81, 87, 72,
141, 119, 36, 224, 63, 184, 216, 74, 55, 106, 67, 184, 244, 21, 24, 161, 28, 195,
135, 182, 105, 178, 238, 101, 134, 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 9, 8, 7, 6, 5, 4, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3
],
);
}
}
| new | identifier_name |
transaction.rs | //! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use hash::{Hash, Hasher};
use serde::Serialize;
use sha2::Sha512;
use signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::pubkey::Pubkey;
use std::mem::size_of;
pub const SIGNED_DATA_OFFSET: usize = size_of::<Signature>();
pub const SIG_OFFSET: usize = 0;
pub const PUB_KEY_OFFSET: usize = size_of::<Signature>() + size_of::<u64>();
/// An instruction to execute a program under the `program_id` of `program_ids_index` with the
/// specified accounts and userdata
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Instruction {
/// The program code that executes this transaction is identified by the program_id.
/// this is an offset into the Transaction::program_ids field
pub program_ids_index: u8,
/// Indices into the keys array of which accounts to load
pub accounts: Vec<u8>,
/// Userdata to be stored in the account
pub userdata: Vec<u8>,
}
impl Instruction {
pub fn new<T: Serialize>(program_ids_index: u8, userdata: &T, accounts: Vec<u8>) -> Self {
let userdata = serialize(userdata).unwrap();
Instruction {
program_ids_index,
userdata,
accounts,
}
}
}
/// An atomic transaction
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
/// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`.
pub signature: Signature,
/// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is
/// program-specific.
/// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0].
/// In the future which key pays the fee and which keys have signatures would be configurable.
/// * account_keys[1] - Typically this is the program context or the recipient of the tokens
pub account_keys: Vec<Pubkey>,
/// The ID of a recent ledger entry.
pub last_id: Hash,
/// The number of tokens paid for processing and storage of this transaction.
pub fee: u64,
/// Keys identifying programs in the instructions vector.
pub program_ids: Vec<Pubkey>,
/// Programs that will be executed in sequence and commited in one atomic transaction if all
/// succeed.
pub instructions: Vec<Instruction>,
}
impl Transaction {
pub fn new<T: Serialize>(
from_keypair: &Keypair,
transaction_keys: &[Pubkey],
program_id: Pubkey,
userdata: &T,
last_id: Hash,
fee: u64,
) -> Self {
let program_ids = vec![program_id];
let accounts = (0..=transaction_keys.len() as u8).collect();
let instructions = vec![Instruction::new(0, userdata, accounts)];
Self::new_with_instructions(
from_keypair,
transaction_keys,
last_id,
fee,
program_ids,
instructions,
)
}
/// Create a signed transaction
/// * `from_keypair` - The key used to sign the transaction. This key is stored as keys[0]
/// * `account_keys` - The keys for the transaction. These are the program state
/// instances or token recipient keys.
/// * `last_id` - The PoH hash.
/// * `fee` - The transaction fee.
/// * `program_ids` - The keys that identify programs used in the `instruction` vector.
/// * `instructions` - The programs and their arguments that the transaction will execute atomically
pub fn new_with_instructions(
from_keypair: &Keypair,
keys: &[Pubkey],
last_id: Hash,
fee: u64,
program_ids: Vec<Pubkey>,
instructions: Vec<Instruction>,
) -> Self {
let from = from_keypair.pubkey();
let mut account_keys = vec![from];
account_keys.extend_from_slice(keys);
let mut tx = Transaction {
signature: Signature::default(),
account_keys,
last_id: Hash::default(),
fee,
program_ids,
instructions,
};
tx.sign(from_keypair, last_id);
tx
}
pub fn userdata(&self, instruction_index: usize) -> &[u8] {
&self.instructions[instruction_index].userdata
}
fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> {
self.instructions
.get(instruction_index)
.and_then(|instruction| instruction.accounts.get(accounts_index))
.map(|&account_keys_index| account_keys_index as usize)
}
pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
self.key_index(instruction_index, accounts_index)
.and_then(|account_keys_index| self.account_keys.get(account_keys_index))
}
pub fn signed_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
match self.key_index(instruction_index, accounts_index) {
None => None,
Some(0) => self.account_keys.get(0),
Some(_) => None,
}
}
pub fn program_id(&self, instruction_index: usize) -> &Pubkey {
let program_ids_index = self.instructions[instruction_index].program_ids_index;
&self.program_ids[program_ids_index as usize]
}
/// Get the transaction data to sign.
pub fn get_sign_data(&self) -> Vec<u8> {
let mut data = serialize(&self.account_keys).expect("serialize account_keys");
let last_id_data = serialize(&self.last_id).expect("serialize last_id"); | data.extend_from_slice(&fee_data);
let program_ids = serialize(&self.program_ids).expect("serialize program_ids");
data.extend_from_slice(&program_ids);
let instructions = serialize(&self.instructions).expect("serialize instructions");
data.extend_from_slice(&instructions);
data
}
/// Sign this transaction.
pub fn sign(&mut self, keypair: &Keypair, last_id: Hash) {
self.last_id = last_id;
let sign_data = self.get_sign_data();
self.signature = Signature::new(&keypair.sign::<Sha512>(&sign_data).to_bytes());
}
/// Verify only the transaction signature.
pub fn verify_signature(&self) -> bool {
warn!("transaction signature verification called");
self.signature
.verify(&self.from().as_ref(), &self.get_sign_data())
}
/// Verify that references in the instructions are valid
pub fn verify_refs(&self) -> bool {
for instruction in &self.instructions {
if (instruction.program_ids_index as usize) >= self.program_ids.len() {
return false;
}
for account_index in &instruction.accounts {
if (*account_index as usize) >= self.account_keys.len() {
return false;
}
}
}
true
}
pub fn from(&self) -> &Pubkey {
&self.account_keys[0]
}
// a hash of a slice of transactions only needs to hash the signatures
pub fn hash(transactions: &[Transaction]) -> Hash {
let mut hasher = Hasher::default();
transactions
.iter()
.for_each(|tx| hasher.hash(&tx.signature.as_ref()));
hasher.result()
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use signature::GenKeys;
#[test]
fn test_refs() {
let key = Keypair::new();
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let prog1 = Keypair::new().pubkey();
let prog2 = Keypair::new().pubkey();
let instructions = vec![
Instruction::new(0, &(), vec![0, 1]),
Instruction::new(1, &(), vec![0, 2]),
];
let tx = Transaction::new_with_instructions(
&key,
&[key1, key2],
Default::default(),
0,
vec![prog1, prog2],
instructions,
);
assert!(tx.verify_refs());
assert_eq!(tx.key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.signed_key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.signed_key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.key(0, 1), Some(&key1));
assert_eq!(tx.signed_key(0, 1), None);
assert_eq!(tx.key(1, 1), Some(&key2));
assert_eq!(tx.signed_key(1, 1), None);
assert_eq!(tx.key(2, 0), None);
assert_eq!(tx.signed_key(2, 0), None);
assert_eq!(tx.key(0, 2), None);
assert_eq!(tx.signed_key(0, 2), None);
assert_eq!(*tx.program_id(0), prog1);
assert_eq!(*tx.program_id(1), prog2);
}
#[test]
fn test_refs_invalid_program_id() {
let key = Keypair::new();
let instructions = vec![Instruction::new(1, &(), vec![])];
let tx = Transaction::new_with_instructions(
&key,
&[],
Default::default(),
0,
vec![],
instructions,
);
assert!(!tx.verify_refs());
}
#[test]
fn test_refs_invalid_account() {
let key = Keypair::new();
let instructions = vec![Instruction::new(0, &(), vec![1])];
let tx = Transaction::new_with_instructions(
&key,
&[],
Default::default(),
0,
vec![Default::default()],
instructions,
);
assert_eq!(*tx.program_id(0), Default::default());
assert!(!tx.verify_refs());
}
/// Detect binary changes in the serialized contract userdata, which could have a downstream
/// affect on SDKs and DApps
#[test]
fn test_sdk_serialize() {
let keypair = &GenKeys::new([0u8; 32]).gen_n_keypairs(1)[0];
let to = Pubkey::new(&[
1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4,
1, 1, 1,
]);
let program_id = Pubkey::new(&[
2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 6, 5, 4,
2, 2, 2,
]);
let tx = Transaction::new(
keypair,
&[keypair.pubkey(), to],
program_id,
&(1u8, 2u8, 3u8),
Hash::default(),
99,
);
assert_eq!(
serialize(&tx).unwrap(),
vec![
238, 228, 120, 18, 14, 44, 44, 74, 186, 124, 104, 174, 137, 227, 237, 157, 147, 37,
230, 74, 20, 48, 234, 36, 170, 60, 68, 184, 171, 240, 203, 18, 255, 110, 164, 67,
212, 206, 115, 182, 13, 90, 38, 215, 191, 51, 79, 183, 57, 102, 248, 221, 114, 72,
120, 66, 113, 146, 251, 102, 69, 187, 25, 8, 3, 0, 0, 0, 0, 0, 0, 0, 218, 65, 89,
124, 81, 87, 72, 141, 119, 36, 224, 63, 184, 216, 74, 55, 106, 67, 184, 244, 21,
24, 161, 28, 195, 135, 182, 105, 178, 238, 101, 134, 218, 65, 89, 124, 81, 87, 72,
141, 119, 36, 224, 63, 184, 216, 74, 55, 106, 67, 184, 244, 21, 24, 161, 28, 195,
135, 182, 105, 178, 238, 101, 134, 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 9, 8, 7, 6, 5, 4, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3
],
);
}
} | data.extend_from_slice(&last_id_data);
let fee_data = serialize(&self.fee).expect("serialize fee"); | random_line_split |
transaction.rs | //! The `transaction` module provides functionality for creating log transactions.
use bincode::serialize;
use hash::{Hash, Hasher};
use serde::Serialize;
use sha2::Sha512;
use signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::pubkey::Pubkey;
use std::mem::size_of;
pub const SIGNED_DATA_OFFSET: usize = size_of::<Signature>();
pub const SIG_OFFSET: usize = 0;
pub const PUB_KEY_OFFSET: usize = size_of::<Signature>() + size_of::<u64>();
/// An instruction to execute a program under the `program_id` of `program_ids_index` with the
/// specified accounts and userdata
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Instruction {
/// The program code that executes this transaction is identified by the program_id.
/// this is an offset into the Transaction::program_ids field
pub program_ids_index: u8,
/// Indices into the keys array of which accounts to load
pub accounts: Vec<u8>,
/// Userdata to be stored in the account
pub userdata: Vec<u8>,
}
impl Instruction {
pub fn new<T: Serialize>(program_ids_index: u8, userdata: &T, accounts: Vec<u8>) -> Self |
}
/// An atomic transaction
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Transaction {
/// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`.
pub signature: Signature,
/// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is
/// program-specific.
/// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0].
/// In the future which key pays the fee and which keys have signatures would be configurable.
/// * account_keys[1] - Typically this is the program context or the recipient of the tokens
pub account_keys: Vec<Pubkey>,
/// The ID of a recent ledger entry.
pub last_id: Hash,
/// The number of tokens paid for processing and storage of this transaction.
pub fee: u64,
/// Keys identifying programs in the instructions vector.
pub program_ids: Vec<Pubkey>,
/// Programs that will be executed in sequence and commited in one atomic transaction if all
/// succeed.
pub instructions: Vec<Instruction>,
}
impl Transaction {
pub fn new<T: Serialize>(
from_keypair: &Keypair,
transaction_keys: &[Pubkey],
program_id: Pubkey,
userdata: &T,
last_id: Hash,
fee: u64,
) -> Self {
let program_ids = vec![program_id];
let accounts = (0..=transaction_keys.len() as u8).collect();
let instructions = vec![Instruction::new(0, userdata, accounts)];
Self::new_with_instructions(
from_keypair,
transaction_keys,
last_id,
fee,
program_ids,
instructions,
)
}
/// Create a signed transaction
/// * `from_keypair` - The key used to sign the transaction. This key is stored as keys[0]
/// * `account_keys` - The keys for the transaction. These are the program state
/// instances or token recipient keys.
/// * `last_id` - The PoH hash.
/// * `fee` - The transaction fee.
/// * `program_ids` - The keys that identify programs used in the `instruction` vector.
/// * `instructions` - The programs and their arguments that the transaction will execute atomically
pub fn new_with_instructions(
from_keypair: &Keypair,
keys: &[Pubkey],
last_id: Hash,
fee: u64,
program_ids: Vec<Pubkey>,
instructions: Vec<Instruction>,
) -> Self {
let from = from_keypair.pubkey();
let mut account_keys = vec![from];
account_keys.extend_from_slice(keys);
let mut tx = Transaction {
signature: Signature::default(),
account_keys,
last_id: Hash::default(),
fee,
program_ids,
instructions,
};
tx.sign(from_keypair, last_id);
tx
}
pub fn userdata(&self, instruction_index: usize) -> &[u8] {
&self.instructions[instruction_index].userdata
}
fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> {
self.instructions
.get(instruction_index)
.and_then(|instruction| instruction.accounts.get(accounts_index))
.map(|&account_keys_index| account_keys_index as usize)
}
pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
self.key_index(instruction_index, accounts_index)
.and_then(|account_keys_index| self.account_keys.get(account_keys_index))
}
pub fn signed_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> {
match self.key_index(instruction_index, accounts_index) {
None => None,
Some(0) => self.account_keys.get(0),
Some(_) => None,
}
}
pub fn program_id(&self, instruction_index: usize) -> &Pubkey {
let program_ids_index = self.instructions[instruction_index].program_ids_index;
&self.program_ids[program_ids_index as usize]
}
/// Get the transaction data to sign.
pub fn get_sign_data(&self) -> Vec<u8> {
let mut data = serialize(&self.account_keys).expect("serialize account_keys");
let last_id_data = serialize(&self.last_id).expect("serialize last_id");
data.extend_from_slice(&last_id_data);
let fee_data = serialize(&self.fee).expect("serialize fee");
data.extend_from_slice(&fee_data);
let program_ids = serialize(&self.program_ids).expect("serialize program_ids");
data.extend_from_slice(&program_ids);
let instructions = serialize(&self.instructions).expect("serialize instructions");
data.extend_from_slice(&instructions);
data
}
/// Sign this transaction.
pub fn sign(&mut self, keypair: &Keypair, last_id: Hash) {
self.last_id = last_id;
let sign_data = self.get_sign_data();
self.signature = Signature::new(&keypair.sign::<Sha512>(&sign_data).to_bytes());
}
/// Verify only the transaction signature.
pub fn verify_signature(&self) -> bool {
warn!("transaction signature verification called");
self.signature
.verify(&self.from().as_ref(), &self.get_sign_data())
}
/// Verify that references in the instructions are valid
pub fn verify_refs(&self) -> bool {
for instruction in &self.instructions {
if (instruction.program_ids_index as usize) >= self.program_ids.len() {
return false;
}
for account_index in &instruction.accounts {
if (*account_index as usize) >= self.account_keys.len() {
return false;
}
}
}
true
}
pub fn from(&self) -> &Pubkey {
&self.account_keys[0]
}
// a hash of a slice of transactions only needs to hash the signatures
pub fn hash(transactions: &[Transaction]) -> Hash {
let mut hasher = Hasher::default();
transactions
.iter()
.for_each(|tx| hasher.hash(&tx.signature.as_ref()));
hasher.result()
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use signature::GenKeys;
#[test]
fn test_refs() {
let key = Keypair::new();
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let prog1 = Keypair::new().pubkey();
let prog2 = Keypair::new().pubkey();
let instructions = vec![
Instruction::new(0, &(), vec![0, 1]),
Instruction::new(1, &(), vec![0, 2]),
];
let tx = Transaction::new_with_instructions(
&key,
&[key1, key2],
Default::default(),
0,
vec![prog1, prog2],
instructions,
);
assert!(tx.verify_refs());
assert_eq!(tx.key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.signed_key(0, 0), Some(&key.pubkey()));
assert_eq!(tx.key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.signed_key(1, 0), Some(&key.pubkey()));
assert_eq!(tx.key(0, 1), Some(&key1));
assert_eq!(tx.signed_key(0, 1), None);
assert_eq!(tx.key(1, 1), Some(&key2));
assert_eq!(tx.signed_key(1, 1), None);
assert_eq!(tx.key(2, 0), None);
assert_eq!(tx.signed_key(2, 0), None);
assert_eq!(tx.key(0, 2), None);
assert_eq!(tx.signed_key(0, 2), None);
assert_eq!(*tx.program_id(0), prog1);
assert_eq!(*tx.program_id(1), prog2);
}
#[test]
fn test_refs_invalid_program_id() {
let key = Keypair::new();
let instructions = vec![Instruction::new(1, &(), vec![])];
let tx = Transaction::new_with_instructions(
&key,
&[],
Default::default(),
0,
vec![],
instructions,
);
assert!(!tx.verify_refs());
}
#[test]
fn test_refs_invalid_account() {
let key = Keypair::new();
let instructions = vec![Instruction::new(0, &(), vec![1])];
let tx = Transaction::new_with_instructions(
&key,
&[],
Default::default(),
0,
vec![Default::default()],
instructions,
);
assert_eq!(*tx.program_id(0), Default::default());
assert!(!tx.verify_refs());
}
/// Detect binary changes in the serialized contract userdata, which could have a downstream
/// affect on SDKs and DApps
#[test]
fn test_sdk_serialize() {
let keypair = &GenKeys::new([0u8; 32]).gen_n_keypairs(1)[0];
let to = Pubkey::new(&[
1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4,
1, 1, 1,
]);
let program_id = Pubkey::new(&[
2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 6, 5, 4,
2, 2, 2,
]);
let tx = Transaction::new(
keypair,
&[keypair.pubkey(), to],
program_id,
&(1u8, 2u8, 3u8),
Hash::default(),
99,
);
assert_eq!(
serialize(&tx).unwrap(),
vec![
238, 228, 120, 18, 14, 44, 44, 74, 186, 124, 104, 174, 137, 227, 237, 157, 147, 37,
230, 74, 20, 48, 234, 36, 170, 60, 68, 184, 171, 240, 203, 18, 255, 110, 164, 67,
212, 206, 115, 182, 13, 90, 38, 215, 191, 51, 79, 183, 57, 102, 248, 221, 114, 72,
120, 66, 113, 146, 251, 102, 69, 187, 25, 8, 3, 0, 0, 0, 0, 0, 0, 0, 218, 65, 89,
124, 81, 87, 72, 141, 119, 36, 224, 63, 184, 216, 74, 55, 106, 67, 184, 244, 21,
24, 161, 28, 195, 135, 182, 105, 178, 238, 101, 134, 218, 65, 89, 124, 81, 87, 72,
141, 119, 36, 224, 63, 184, 216, 74, 55, 106, 67, 184, 244, 21, 24, 161, 28, 195,
135, 182, 105, 178, 238, 101, 134, 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 9, 8, 7, 6, 5, 4, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3
],
);
}
}
| {
let userdata = serialize(userdata).unwrap();
Instruction {
program_ids_index,
userdata,
accounts,
}
} | identifier_body |
main.rs | extern crate getopts;
extern crate hyper;
extern crate futures;
extern crate tokio_core;
extern crate hyper_tls;
extern crate pretty_env_logger;
extern crate ftp;
use std::io::Read;
use getopts::Options;
use std::str;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::io::stdin;
use std::env;
use std::io::{self, Write};
use futures::Future;
use futures::stream::Stream;
use hyper::Client;
use ftp::FtpStream;
fn main() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
pretty_env_logger::init().unwrap();
// Using args() instead of args_os(), cause they never panic
let commandline_args: Vec<_> = env::args().collect();
let program = commandline_args[0].clone();
// Use the getopts package Options structure
let mut opts = Options::new();
// Create the file argument
opts.optopt("d", "", "Specify destination file", "NAME");
// Create help flag (-h or --help)
opts.optflag("h", "help", "Print this help menu");
// Create version l
opts.optflag("v", "version", "Check the version you're running");
// Use the innate parse() method
// https://doc.rust-lang.org/1.2.0/book/match.html
// https://doc.rust-lang.org/std/macro.panic.html
let matches = match opts.parse(&commandline_args[1..]){
Ok(m) => { m }
Err(f) => {panic!(f.to_string())}
};
// Handle help flags
if matches.opt_present("h"){
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
return;
} else if matches.opt_present("v"){
println!("Version: v{}", VERSION.unwrap_or("unknown"));
return;
}
// Check if the input file has been specified
let input = if!matches.free.is_empty(){
matches.free[0].clone()
} else {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
return;
};
// Check if the destination is empty - if so, we extract the name from given source path
let dest = match matches.opt_str("d") {
Some(x) => x,
None => extract_file_name_if_empty_string(input.clone()),
};
// Get URL to see what type of protocol we're dealing with
let url = input.clone();
let url = url.parse::<hyper::Uri>().unwrap();
// Depending on the protocol - call appropriate functions
match url.scheme(){
Some("http") => http_download_single_file(url, &dest[..]),
Some("https") => https_download_single_file(url, &dest[..]),
Some("ftp") => ftp_download_single_file(input, &dest[..]),
// Some("ftps") => ftps_download_single_file(input, &dest[..]),
Some(&_) => panic!("Sorry, unknown protocol!"),
None => panic!("Sorry, no protocol given!"),
}
}
// Download a single file form FTP server
// fn ftps_download_single_file(input: std::string::String, destination: &str){
// }
// Download a single file form FTP server
fn ftp_download_single_file(input: std::string::String, destination: &str){
let (host, directory, file) = parse_data_from_ftp_fullpath(input.clone());
// Create a connection to an FTP server and authenticate to it.
let mut ftp_stream = FtpStream::connect(host).unwrap_or_else(|err|
panic!("{}", err)
);
// Set transfer_type to binary so we can properly transport images
let _ = ftp_stream.transfer_type(ftp::types::FileType::Binary);
let (user, password) = parse_userdata_from_ftp_fullpath(input);
let _ = ftp_stream.login(&user[..], &password[..]).unwrap();
// Change into a new directory, relative to the one we are currently in.
let _ = ftp_stream.cwd(&directory[..]).unwrap();
let path = Path::new(destination);
let display = path.display();
let reader = ftp_stream.get(&file).unwrap();
let iterator = reader.bytes();
//Open a file in write-only mode, returns `io::Result<File>`
let mut local_file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for byte in iterator {
// println!("{}", byte.unwrap());
match local_file.write(&[byte.unwrap()]) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
};
}
let _ = local_file.flush();
// -- BufReader, iteracja po byte'ach --
// let mut reader = ftp_stream.get(file).unwrap();
// //Open a file in write-only mode, returns `io::Result<File>`
// let mut local_file = match File::create(&path) {
// Err(why) => panic!("couldn't create {}: {}",
// display,
// why.description()),
// Ok(file) => file,
// };
// loop{
// let chunk = read_n(&mut reader, 5);
// match chunk {
// Ok(v) => match io::stdout().write_all(&v) {
// Err(why) => {
// panic!("couldn't write to {}: {}", display,
// why.description())
// },
// Ok(_) => (),
// },
// Err(0) => return,
// Err(_) => panic!("OMG!"),
// };
// }
// -- simple_retr --
// let remote_file = ftp_stream.simple_retr("file").unwrap();
// println!("Read file with contents\n{}\n", str::from_utf8(&remote_file.into_inner()).unwrap());
// Terminate the connection to the server.
let _ = ftp_stream.quit();
}
#[allow(dead_code)]
fn read_n<R>(reader: R, bytes_to_read: u64) -> Result<Vec<u8>, i32>
where R: Read,
{
let mut buf = vec![];
let mut chunk = reader.take(bytes_to_read);
let status = chunk.read_to_end(&mut buf);
// Do appropriate error handling
match status {
Ok(0) => Err(0),
Ok(_) => Ok(buf),
_ => panic!("Didn't read enough"),
}
}
// Function that uses futures
#[allow(dead_code)]
#[allow(unused_variables, unused_mut)]
fn http_download_single_file_work(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let client = Client::new(&handle);
let work = client.get(url).and_then(|res| {
println!("Response: {}", res.status());
println!("Headers: \n{}", res.headers());
res.body().for_each(|chunk| {
io::stdout().write_all(&chunk).map_err(From::from)
})
}).map(|_| {
println!("\n\nDone.");
});
core.run(work).unwrap();
}
// Function that downloads a single file
// It doesnt user futures - blocking and not very effective
fn http_download_single_file(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let client = Client::new(&handle);
let work = client.get(url);
let reponse = core.run(work).unwrap();
let buf2 = reponse.body().collect();
let finally = match core.run(buf2){
Ok(res) => res,
Err(_) => panic!("OMG"),
};
let path = Path::new(destination);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for x in &finally {
match file.write_all(&x) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
}
}
println!("successfully wrote to {}", display);
}
// Function that downloads a single file
// It doesnt user futures - blocking and not very effective
fn https_download_single_file(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let client = Client::configure().connector(::hyper_tls::HttpsConnector::new(4, &core.handle()).unwrap()).build(&core.handle());
let work = client.get(url);
let reponse = core.run(work).unwrap();
let buf2 = reponse.body().collect();
let finally = match core.run(buf2){
Ok(res) => res,
Err(_) => panic!("OMG"),
};
let path = Path::new(destination);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
}; | match file.write_all(&x) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
}
}
println!("successfully wrote to {}", display);
}
fn extract_file_name_if_empty_string(fullpath: std::string::String) -> std::string::String {
let split: Vec<&str> = fullpath.split("/").collect();
std::string::String::from(*split.last().unwrap())
}
fn parse_data_from_ftp_fullpath(input: std::string::String) -> (std::string::String, std::string::String, std::string::String){
let replace = input.replace("ftp://", "");
let split: Vec<&str> = replace.split("/").collect();
let split2 = split.clone();
let split3: Vec<&str> = split2.first().unwrap().split("@").collect();
let host = split3.last().unwrap();
let proper_host = format!("{}:21", host);
let file = split.last().unwrap();
let directory = split[1..split.len()-1].join("/");
(proper_host, directory, std::string::String::from(*file))
}
fn parse_userdata_from_ftp_fullpath(input: std::string::String) -> (std::string::String, std::string::String){
let replace = input.replace("ftp://", "");
let mut username = std::string::String::new();
let mut password = std::string::String::new();
if replace.contains("@") {
let split: Vec<&str> = replace.split("@").collect();
let split2: Vec<&str> = split.first().unwrap().split(":").collect();
username = std::string::String::from(*split2.first().unwrap()).clone();
password = std::string::String::from(*split2.last().unwrap()).clone();
} else {
println!("User: ");
stdin().read_line(&mut username).expect("Did not enter a correct string");
if let Some('\n')=username.chars().next_back() {
username.pop();
}
if let Some('\r')=username.chars().next_back() {
username.pop();
}
println!("Password: ");
stdin().read_line(&mut password).expect("Did not enter a correct string");
if let Some('\n')=password.chars().next_back() {
password.pop();
}
if let Some('\r')=password.chars().next_back() {
password.pop();
}
}
(username, password)
} |
for x in &finally { | random_line_split |
main.rs | extern crate getopts;
extern crate hyper;
extern crate futures;
extern crate tokio_core;
extern crate hyper_tls;
extern crate pretty_env_logger;
extern crate ftp;
use std::io::Read;
use getopts::Options;
use std::str;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::io::stdin;
use std::env;
use std::io::{self, Write};
use futures::Future;
use futures::stream::Stream;
use hyper::Client;
use ftp::FtpStream;
fn main() | // https://doc.rust-lang.org/std/macro.panic.html
let matches = match opts.parse(&commandline_args[1..]){
Ok(m) => { m }
Err(f) => {panic!(f.to_string())}
};
// Handle help flags
if matches.opt_present("h"){
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
return;
} else if matches.opt_present("v"){
println!("Version: v{}", VERSION.unwrap_or("unknown"));
return;
}
// Check if the input file has been specified
let input = if!matches.free.is_empty(){
matches.free[0].clone()
} else {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
return;
};
// Check if the destination is empty - if so, we extract the name from given source path
let dest = match matches.opt_str("d") {
Some(x) => x,
None => extract_file_name_if_empty_string(input.clone()),
};
// Get URL to see what type of protocol we're dealing with
let url = input.clone();
let url = url.parse::<hyper::Uri>().unwrap();
// Depending on the protocol - call appropriate functions
match url.scheme(){
Some("http") => http_download_single_file(url, &dest[..]),
Some("https") => https_download_single_file(url, &dest[..]),
Some("ftp") => ftp_download_single_file(input, &dest[..]),
// Some("ftps") => ftps_download_single_file(input, &dest[..]),
Some(&_) => panic!("Sorry, unknown protocol!"),
None => panic!("Sorry, no protocol given!"),
}
}
// Download a single file form FTP server
// fn ftps_download_single_file(input: std::string::String, destination: &str){
// }
// Download a single file form FTP server
fn ftp_download_single_file(input: std::string::String, destination: &str){
let (host, directory, file) = parse_data_from_ftp_fullpath(input.clone());
// Create a connection to an FTP server and authenticate to it.
let mut ftp_stream = FtpStream::connect(host).unwrap_or_else(|err|
panic!("{}", err)
);
// Set transfer_type to binary so we can properly transport images
let _ = ftp_stream.transfer_type(ftp::types::FileType::Binary);
let (user, password) = parse_userdata_from_ftp_fullpath(input);
let _ = ftp_stream.login(&user[..], &password[..]).unwrap();
// Change into a new directory, relative to the one we are currently in.
let _ = ftp_stream.cwd(&directory[..]).unwrap();
let path = Path::new(destination);
let display = path.display();
let reader = ftp_stream.get(&file).unwrap();
let iterator = reader.bytes();
//Open a file in write-only mode, returns `io::Result<File>`
let mut local_file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for byte in iterator {
// println!("{}", byte.unwrap());
match local_file.write(&[byte.unwrap()]) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
};
}
let _ = local_file.flush();
// -- BufReader, iteracja po byte'ach --
// let mut reader = ftp_stream.get(file).unwrap();
// //Open a file in write-only mode, returns `io::Result<File>`
// let mut local_file = match File::create(&path) {
// Err(why) => panic!("couldn't create {}: {}",
// display,
// why.description()),
// Ok(file) => file,
// };
// loop{
// let chunk = read_n(&mut reader, 5);
// match chunk {
// Ok(v) => match io::stdout().write_all(&v) {
// Err(why) => {
// panic!("couldn't write to {}: {}", display,
// why.description())
// },
// Ok(_) => (),
// },
// Err(0) => return,
// Err(_) => panic!("OMG!"),
// };
// }
// -- simple_retr --
// let remote_file = ftp_stream.simple_retr("file").unwrap();
// println!("Read file with contents\n{}\n", str::from_utf8(&remote_file.into_inner()).unwrap());
// Terminate the connection to the server.
let _ = ftp_stream.quit();
}
#[allow(dead_code)]
fn read_n<R>(reader: R, bytes_to_read: u64) -> Result<Vec<u8>, i32>
where R: Read,
{
let mut buf = vec![];
let mut chunk = reader.take(bytes_to_read);
let status = chunk.read_to_end(&mut buf);
// Do appropriate error handling
match status {
Ok(0) => Err(0),
Ok(_) => Ok(buf),
_ => panic!("Didn't read enough"),
}
}
// Function that uses futures
#[allow(dead_code)]
#[allow(unused_variables, unused_mut)]
fn http_download_single_file_work(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let client = Client::new(&handle);
let work = client.get(url).and_then(|res| {
println!("Response: {}", res.status());
println!("Headers: \n{}", res.headers());
res.body().for_each(|chunk| {
io::stdout().write_all(&chunk).map_err(From::from)
})
}).map(|_| {
println!("\n\nDone.");
});
core.run(work).unwrap();
}
// Function that downloads a single file
// It doesnt user futures - blocking and not very effective
fn http_download_single_file(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let client = Client::new(&handle);
let work = client.get(url);
let reponse = core.run(work).unwrap();
let buf2 = reponse.body().collect();
let finally = match core.run(buf2){
Ok(res) => res,
Err(_) => panic!("OMG"),
};
let path = Path::new(destination);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for x in &finally {
match file.write_all(&x) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
}
}
println!("successfully wrote to {}", display);
}
// Function that downloads a single file
// It doesnt user futures - blocking and not very effective
fn https_download_single_file(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let client = Client::configure().connector(::hyper_tls::HttpsConnector::new(4, &core.handle()).unwrap()).build(&core.handle());
let work = client.get(url);
let reponse = core.run(work).unwrap();
let buf2 = reponse.body().collect();
let finally = match core.run(buf2){
Ok(res) => res,
Err(_) => panic!("OMG"),
};
let path = Path::new(destination);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for x in &finally {
match file.write_all(&x) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
}
}
println!("successfully wrote to {}", display);
}
fn extract_file_name_if_empty_string(fullpath: std::string::String) -> std::string::String {
let split: Vec<&str> = fullpath.split("/").collect();
std::string::String::from(*split.last().unwrap())
}
fn parse_data_from_ftp_fullpath(input: std::string::String) -> (std::string::String, std::string::String, std::string::String){
let replace = input.replace("ftp://", "");
let split: Vec<&str> = replace.split("/").collect();
let split2 = split.clone();
let split3: Vec<&str> = split2.first().unwrap().split("@").collect();
let host = split3.last().unwrap();
let proper_host = format!("{}:21", host);
let file = split.last().unwrap();
let directory = split[1..split.len()-1].join("/");
(proper_host, directory, std::string::String::from(*file))
}
fn parse_userdata_from_ftp_fullpath(input: std::string::String) -> (std::string::String, std::string::String){
let replace = input.replace("ftp://", "");
let mut username = std::string::String::new();
let mut password = std::string::String::new();
if replace.contains("@") {
let split: Vec<&str> = replace.split("@").collect();
let split2: Vec<&str> = split.first().unwrap().split(":").collect();
username = std::string::String::from(*split2.first().unwrap()).clone();
password = std::string::String::from(*split2.last().unwrap()).clone();
} else {
println!("User: ");
stdin().read_line(&mut username).expect("Did not enter a correct string");
if let Some('\n')=username.chars().next_back() {
username.pop();
}
if let Some('\r')=username.chars().next_back() {
username.pop();
}
println!("Password: ");
stdin().read_line(&mut password).expect("Did not enter a correct string");
if let Some('\n')=password.chars().next_back() {
password.pop();
}
if let Some('\r')=password.chars().next_back() {
password.pop();
}
}
(username, password)
} | {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
pretty_env_logger::init().unwrap();
// Using args() instead of args_os(), cause they never panic
let commandline_args: Vec<_> = env::args().collect();
let program = commandline_args[0].clone();
// Use the getopts package Options structure
let mut opts = Options::new();
// Create the file argument
opts.optopt("d", "", "Specify destination file", "NAME");
// Create help flag (-h or --help)
opts.optflag("h", "help", "Print this help menu");
// Create version l
opts.optflag("v", "version", "Check the version you're running");
// Use the innate parse() method
// https://doc.rust-lang.org/1.2.0/book/match.html | identifier_body |
main.rs | extern crate getopts;
extern crate hyper;
extern crate futures;
extern crate tokio_core;
extern crate hyper_tls;
extern crate pretty_env_logger;
extern crate ftp;
use std::io::Read;
use getopts::Options;
use std::str;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::io::stdin;
use std::env;
use std::io::{self, Write};
use futures::Future;
use futures::stream::Stream;
use hyper::Client;
use ftp::FtpStream;
fn main() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
pretty_env_logger::init().unwrap();
// Using args() instead of args_os(), cause they never panic
let commandline_args: Vec<_> = env::args().collect();
let program = commandline_args[0].clone();
// Use the getopts package Options structure
let mut opts = Options::new();
// Create the file argument
opts.optopt("d", "", "Specify destination file", "NAME");
// Create help flag (-h or --help)
opts.optflag("h", "help", "Print this help menu");
// Create version l
opts.optflag("v", "version", "Check the version you're running");
// Use the innate parse() method
// https://doc.rust-lang.org/1.2.0/book/match.html
// https://doc.rust-lang.org/std/macro.panic.html
let matches = match opts.parse(&commandline_args[1..]){
Ok(m) => { m }
Err(f) => {panic!(f.to_string())}
};
// Handle help flags
if matches.opt_present("h"){
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
return;
} else if matches.opt_present("v"){
println!("Version: v{}", VERSION.unwrap_or("unknown"));
return;
}
// Check if the input file has been specified
let input = if!matches.free.is_empty(){
matches.free[0].clone()
} else {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
return;
};
// Check if the destination is empty - if so, we extract the name from given source path
let dest = match matches.opt_str("d") {
Some(x) => x,
None => extract_file_name_if_empty_string(input.clone()),
};
// Get URL to see what type of protocol we're dealing with
let url = input.clone();
let url = url.parse::<hyper::Uri>().unwrap();
// Depending on the protocol - call appropriate functions
match url.scheme(){
Some("http") => http_download_single_file(url, &dest[..]),
Some("https") => https_download_single_file(url, &dest[..]),
Some("ftp") => ftp_download_single_file(input, &dest[..]),
// Some("ftps") => ftps_download_single_file(input, &dest[..]),
Some(&_) => panic!("Sorry, unknown protocol!"),
None => panic!("Sorry, no protocol given!"),
}
}
// Download a single file form FTP server
// fn ftps_download_single_file(input: std::string::String, destination: &str){
// }
// Download a single file form FTP server
fn ftp_download_single_file(input: std::string::String, destination: &str){
let (host, directory, file) = parse_data_from_ftp_fullpath(input.clone());
// Create a connection to an FTP server and authenticate to it.
let mut ftp_stream = FtpStream::connect(host).unwrap_or_else(|err|
panic!("{}", err)
);
// Set transfer_type to binary so we can properly transport images
let _ = ftp_stream.transfer_type(ftp::types::FileType::Binary);
let (user, password) = parse_userdata_from_ftp_fullpath(input);
let _ = ftp_stream.login(&user[..], &password[..]).unwrap();
// Change into a new directory, relative to the one we are currently in.
let _ = ftp_stream.cwd(&directory[..]).unwrap();
let path = Path::new(destination);
let display = path.display();
let reader = ftp_stream.get(&file).unwrap();
let iterator = reader.bytes();
//Open a file in write-only mode, returns `io::Result<File>`
let mut local_file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for byte in iterator {
// println!("{}", byte.unwrap());
match local_file.write(&[byte.unwrap()]) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
};
}
let _ = local_file.flush();
// -- BufReader, iteracja po byte'ach --
// let mut reader = ftp_stream.get(file).unwrap();
// //Open a file in write-only mode, returns `io::Result<File>`
// let mut local_file = match File::create(&path) {
// Err(why) => panic!("couldn't create {}: {}",
// display,
// why.description()),
// Ok(file) => file,
// };
// loop{
// let chunk = read_n(&mut reader, 5);
// match chunk {
// Ok(v) => match io::stdout().write_all(&v) {
// Err(why) => {
// panic!("couldn't write to {}: {}", display,
// why.description())
// },
// Ok(_) => (),
// },
// Err(0) => return,
// Err(_) => panic!("OMG!"),
// };
// }
// -- simple_retr --
// let remote_file = ftp_stream.simple_retr("file").unwrap();
// println!("Read file with contents\n{}\n", str::from_utf8(&remote_file.into_inner()).unwrap());
// Terminate the connection to the server.
let _ = ftp_stream.quit();
}
#[allow(dead_code)]
fn read_n<R>(reader: R, bytes_to_read: u64) -> Result<Vec<u8>, i32>
where R: Read,
{
let mut buf = vec![];
let mut chunk = reader.take(bytes_to_read);
let status = chunk.read_to_end(&mut buf);
// Do appropriate error handling
match status {
Ok(0) => Err(0),
Ok(_) => Ok(buf),
_ => panic!("Didn't read enough"),
}
}
// Function that uses futures
#[allow(dead_code)]
#[allow(unused_variables, unused_mut)]
fn http_download_single_file_work(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let client = Client::new(&handle);
let work = client.get(url).and_then(|res| {
println!("Response: {}", res.status());
println!("Headers: \n{}", res.headers());
res.body().for_each(|chunk| {
io::stdout().write_all(&chunk).map_err(From::from)
})
}).map(|_| {
println!("\n\nDone.");
});
core.run(work).unwrap();
}
// Function that downloads a single file
// It doesnt user futures - blocking and not very effective
fn http_download_single_file(url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let handle = core.handle();
let client = Client::new(&handle);
let work = client.get(url);
let reponse = core.run(work).unwrap();
let buf2 = reponse.body().collect();
let finally = match core.run(buf2){
Ok(res) => res,
Err(_) => panic!("OMG"),
};
let path = Path::new(destination);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for x in &finally {
match file.write_all(&x) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
}
}
println!("successfully wrote to {}", display);
}
// Function that downloads a single file
// It doesnt user futures - blocking and not very effective
fn | (url: hyper::Uri, destination: &str){
let mut core = tokio_core::reactor::Core::new().unwrap();
let client = Client::configure().connector(::hyper_tls::HttpsConnector::new(4, &core.handle()).unwrap()).build(&core.handle());
let work = client.get(url);
let reponse = core.run(work).unwrap();
let buf2 = reponse.body().collect();
let finally = match core.run(buf2){
Ok(res) => res,
Err(_) => panic!("OMG"),
};
let path = Path::new(destination);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}",
display,
why.description()),
Ok(file) => file,
};
for x in &finally {
match file.write_all(&x) {
Err(why) => {
panic!("couldn't write to {}: {}", display,
why.description())
},
Ok(_) => (),
}
}
println!("successfully wrote to {}", display);
}
fn extract_file_name_if_empty_string(fullpath: std::string::String) -> std::string::String {
let split: Vec<&str> = fullpath.split("/").collect();
std::string::String::from(*split.last().unwrap())
}
fn parse_data_from_ftp_fullpath(input: std::string::String) -> (std::string::String, std::string::String, std::string::String){
let replace = input.replace("ftp://", "");
let split: Vec<&str> = replace.split("/").collect();
let split2 = split.clone();
let split3: Vec<&str> = split2.first().unwrap().split("@").collect();
let host = split3.last().unwrap();
let proper_host = format!("{}:21", host);
let file = split.last().unwrap();
let directory = split[1..split.len()-1].join("/");
(proper_host, directory, std::string::String::from(*file))
}
fn parse_userdata_from_ftp_fullpath(input: std::string::String) -> (std::string::String, std::string::String){
let replace = input.replace("ftp://", "");
let mut username = std::string::String::new();
let mut password = std::string::String::new();
if replace.contains("@") {
let split: Vec<&str> = replace.split("@").collect();
let split2: Vec<&str> = split.first().unwrap().split(":").collect();
username = std::string::String::from(*split2.first().unwrap()).clone();
password = std::string::String::from(*split2.last().unwrap()).clone();
} else {
println!("User: ");
stdin().read_line(&mut username).expect("Did not enter a correct string");
if let Some('\n')=username.chars().next_back() {
username.pop();
}
if let Some('\r')=username.chars().next_back() {
username.pop();
}
println!("Password: ");
stdin().read_line(&mut password).expect("Did not enter a correct string");
if let Some('\n')=password.chars().next_back() {
password.pop();
}
if let Some('\r')=password.chars().next_back() {
password.pop();
}
}
(username, password)
} | https_download_single_file | identifier_name |
update_webhook_message.rs | //! Update a message created by a webhook via execution.
use crate::{
client::Client,
error::Error as HttpError,
request::{
self,
validate_inner::{self, ComponentValidationError, ComponentValidationErrorType},
AuditLogReason, AuditLogReasonError, Form, NullableField, Request,
},
response::{marker::EmptyBody, ResponseFuture},
routing::Route,
};
use serde::Serialize;
use std::{
error::Error,
fmt::{Display, Formatter, Result as FmtResult},
};
use twilight_model::{
application::component::Component,
channel::{embed::Embed, message::AllowedMentions, Attachment},
id::{MessageId, WebhookId},
};
/// A webhook's message can not be updated as configured.
#[derive(Debug)]
pub struct UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType,
source: Option<Box<dyn Error + Send + Sync>>,
}
impl UpdateWebhookMessageError {
/// Immutable reference to the type of error that occurred.
#[must_use = "retrieving the type has no effect if left unused"]
pub const fn kind(&self) -> &UpdateWebhookMessageErrorType {
&self.kind
}
/// Consume the error, returning the source error if there is any.
#[must_use = "consuming the error and retrieving the source has no effect if left unused"]
pub fn into_source(self) -> Option<Box<dyn Error + Send + Sync>> {
self.source
}
/// Consume the error, returning the owned error type and the source error.
#[must_use = "consuming the error into its parts has no effect if left unused"]
pub fn | (
self,
) -> (
UpdateWebhookMessageErrorType,
Option<Box<dyn Error + Send + Sync>>,
) {
(self.kind, self.source)
}
}
impl Display for UpdateWebhookMessageError {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match &self.kind {
UpdateWebhookMessageErrorType::ComponentCount { count } => {
Display::fmt(count, f)?;
f.write_str(" components were provided, but only ")?;
Display::fmt(&ComponentValidationError::COMPONENT_COUNT, f)?;
f.write_str(" root components are allowed")
}
UpdateWebhookMessageErrorType::ComponentInvalid {.. } => {
f.write_str("a provided component is invalid")
}
UpdateWebhookMessageErrorType::ContentInvalid => {
f.write_str("message content is invalid")
}
UpdateWebhookMessageErrorType::EmbedTooLarge {.. } => {
f.write_str("length of one of the embeds is too large")
}
UpdateWebhookMessageErrorType::TooManyEmbeds => {
f.write_str("only 10 embeds may be provided")
}
}
}
}
impl Error for UpdateWebhookMessageError {
fn source(&self) -> Option<&(dyn Error +'static)> {
self.source
.as_ref()
.map(|source| &**source as &(dyn Error +'static))
}
}
/// Type of [`UpdateWebhookMessageError`] that occurred.
#[derive(Debug)]
#[non_exhaustive]
pub enum UpdateWebhookMessageErrorType {
/// Content is over 2000 UTF-16 characters.
ContentInvalid,
/// Length of one of the embeds is over 6000 characters.
EmbedTooLarge {
/// Index of the embed that was too large.
///
/// This can be used to index into the provided embeds to retrieve the
/// invalid embed.
index: usize,
},
/// An invalid message component was provided.
ComponentInvalid {
/// Additional details about the validation failure type.
kind: ComponentValidationErrorType,
},
/// Too many message components were provided.
ComponentCount {
/// Number of components that were provided.
count: usize,
},
/// Too many embeds were provided.
///
/// A webhook can have up to 10 embeds.
TooManyEmbeds,
}
#[derive(Serialize)]
struct UpdateWebhookMessageFields<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
allowed_mentions: Option<AllowedMentions>,
#[serde(skip_serializing_if = "request::slice_is_empty")]
attachments: &'a [Attachment],
#[serde(skip_serializing_if = "Option::is_none")]
components: Option<NullableField<&'a [Component]>>,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<NullableField<&'a str>>,
#[serde(skip_serializing_if = "Option::is_none")]
embeds: Option<NullableField<&'a [Embed]>>,
#[serde(skip_serializing_if = "Option::is_none")]
payload_json: Option<&'a [u8]>,
}
/// Update a message created by a webhook.
///
/// A webhook's message must always have at least one embed or some amount of
/// content. If you wish to delete a webhook's message refer to
/// [`DeleteWebhookMessage`].
///
/// # Examples
///
/// Update a webhook's message by setting the content to `test <@3>` -
/// attempting to mention user ID 3 - and specifying that only that the user may
/// not be mentioned.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_model::{
/// channel::message::AllowedMentions,
/// id::{MessageId, WebhookId}
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// client.update_webhook_message(WebhookId(1), "token here", MessageId(2))
/// // By creating a default set of allowed mentions, no entity can be
/// // mentioned.
/// .allowed_mentions(AllowedMentions::default())
/// .content(Some("test <@3>"))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage
#[must_use = "requests must be configured and executed"]
pub struct UpdateWebhookMessage<'a> {
fields: UpdateWebhookMessageFields<'a>,
files: &'a [(&'a str, &'a [u8])],
http: &'a Client,
message_id: MessageId,
reason: Option<&'a str>,
token: &'a str,
webhook_id: WebhookId,
}
impl<'a> UpdateWebhookMessage<'a> {
/// Maximum number of embeds that a webhook's message may have.
pub const EMBED_COUNT_LIMIT: usize = 10;
pub(crate) const fn new(
http: &'a Client,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> Self {
Self {
fields: UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: None,
embeds: None,
payload_json: None,
},
files: &[],
http,
message_id,
reason: None,
token,
webhook_id,
}
}
/// Set the allowed mentions in the message.
pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self {
self.fields.allowed_mentions.replace(allowed);
self
}
/// Specify multiple attachments already present in the target message to keep.
///
/// If called, all unspecified attachments will be removed from the message.
/// If not called, all attachments will be kept.
pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self {
self.fields.attachments = attachments;
self
}
/// Add multiple [`Component`]s to a message.
///
/// Calling this method multiple times will clear previous calls.
///
/// Pass `None` to clear existing components.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error
/// type if too many components are provided.
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error
/// type if one of the provided components is invalid.
pub fn components(
mut self,
components: Option<&'a [Component]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(components) = components.as_ref() {
validate_inner::components(components).map_err(|source| {
let (kind, inner_source) = source.into_parts();
match kind {
ComponentValidationErrorType::ComponentCount { count } => {
UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentCount { count },
source: inner_source,
}
}
other => UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other },
source: inner_source,
},
}
})?;
}
self.fields.components = Some(NullableField(components));
Ok(self)
}
/// Set the content of the message.
///
/// Pass `None` if you want to remove the message content.
///
/// Note that if there is are no embeds then you will not be able to remove
/// the content of the message.
///
/// The maximum length is 2000 UTF-16 characters.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if
/// the content length is too long.
pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> {
if let Some(content_ref) = content {
if!validate_inner::content_limit(content_ref) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ContentInvalid,
source: None,
});
}
}
self.fields.content = Some(NullableField(content));
Ok(self)
}
/// Set the list of embeds of the webhook's message.
///
/// Pass `None` to remove all of the embeds.
///
/// The maximum number of allowed embeds is defined by
/// [`EMBED_COUNT_LIMIT`].
///
/// The total character length of each embed must not exceed 6000
/// characters. Additionally, the internal fields also have character
/// limits. Refer to [the discord docs] for more information.
///
/// # Examples
///
/// Create an embed and update the message with the new embed. The content
/// of the original message is unaffected and only the embed(s) are
/// modified.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_embed_builder::EmbedBuilder;
/// use twilight_model::id::{MessageId, WebhookId};
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// let embed = EmbedBuilder::new()
/// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.")
/// .title("Twilight")
/// .url("https://twilight.rs")
/// .build()?;
///
/// client.update_webhook_message(WebhookId(1), "token", MessageId(2))
/// .embeds(Some(&[embed]))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type
/// if one of the embeds are too large.
///
/// Returns an [`UpdateWebhookMessageErrorType::TooManyEmbeds`] error type
/// if more than 10 embeds are provided.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/channel#embed-limits
/// [`EMBED_COUNT_LIMIT`]: Self::EMBED_COUNT_LIMIT
pub fn embeds(
mut self,
embeds: Option<&'a [Embed]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(embeds_present) = embeds.as_deref() {
if embeds_present.len() > Self::EMBED_COUNT_LIMIT {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::TooManyEmbeds,
source: None,
});
}
for (idx, embed) in embeds_present.iter().enumerate() {
if let Err(source) = validate_inner::embed(embed) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::EmbedTooLarge { index: idx },
source: Some(Box::new(source)),
});
}
}
}
self.fields.embeds = Some(NullableField(embeds));
Ok(self)
}
/// Attach multiple files to the webhook.
///
/// Calling this method will clear any previous calls.
pub const fn files(mut self, files: &'a [(&'a str, &'a [u8])]) -> Self {
self.files = files;
self
}
/// JSON encoded body of any additional request fields.
///
/// If this method is called, all other fields are ignored, except for
/// [`files`]. See [Discord Docs/Create Message] and
/// [`ExecuteWebhook::payload_json`].
///
/// [`files`]: Self::files
/// [`ExecuteWebhook::payload_json`]: super::ExecuteWebhook::payload_json
/// [Discord Docs/Create Message]: https://discord.com/developers/docs/resources/channel#create-message-params
pub const fn payload_json(mut self, payload_json: &'a [u8]) -> Self {
self.fields.payload_json = Some(payload_json);
self
}
// `self` needs to be consumed and the client returned due to parameters
// being consumed in request construction.
fn request(&mut self) -> Result<Request, HttpError> {
let mut request = Request::builder(&Route::UpdateWebhookMessage {
message_id: self.message_id.0,
token: self.token,
webhook_id: self.webhook_id.0,
})
.use_authorization_token(false);
if!self.files.is_empty() || self.fields.payload_json.is_some() {
let mut form = Form::new();
for (index, (name, file)) in self.files.iter().enumerate() {
form.file(format!("{}", index).as_bytes(), name.as_bytes(), file);
}
if let Some(payload_json) = &self.fields.payload_json {
form.payload_json(payload_json);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
let body = crate::json::to_vec(&self.fields).map_err(HttpError::json)?;
form.payload_json(&body);
}
request = request.form(form);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
request = request.json(&self.fields)?;
}
if let Some(reason) = self.reason.as_ref() {
request = request.headers(request::audit_header(reason)?);
}
Ok(request.build())
}
/// Execute the request, returning a future resolving to a [`Response`].
///
/// [`Response`]: crate::response::Response
pub fn exec(mut self) -> ResponseFuture<EmptyBody> {
match self.request() {
Ok(request) => self.http.request(request),
Err(source) => ResponseFuture::error(source),
}
}
}
impl<'a> AuditLogReason<'a> for UpdateWebhookMessage<'a> {
fn reason(mut self, reason: &'a str) -> Result<Self, AuditLogReasonError> {
self.reason.replace(AuditLogReasonError::validate(reason)?);
Ok(self)
}
}
#[cfg(test)]
mod tests {
use super::{UpdateWebhookMessage, UpdateWebhookMessageFields};
use crate::{
client::Client,
request::{AuditLogReason, NullableField, Request},
routing::Route,
};
use twilight_model::id::{MessageId, WebhookId};
#[test]
fn test_request() {
let client = Client::new("token".to_owned());
let mut builder = UpdateWebhookMessage::new(&client, WebhookId(1), "token", MessageId(2))
.content(Some("test"))
.expect("'test' content couldn't be set")
.reason("reason")
.expect("'reason' is not a valid reason");
let actual = builder.request().expect("failed to create request");
let body = UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: Some(NullableField(Some("test"))),
embeds: None,
payload_json: None,
};
let route = Route::UpdateWebhookMessage {
message_id: 2,
token: "token",
webhook_id: 1,
};
let expected = Request::builder(&route)
.json(&body)
.expect("failed to serialize body")
.build();
assert_eq!(expected.body, actual.body);
assert_eq!(expected.path, actual.path);
}
}
| into_parts | identifier_name |
update_webhook_message.rs | //! Update a message created by a webhook via execution.
use crate::{
client::Client,
error::Error as HttpError,
request::{
self,
validate_inner::{self, ComponentValidationError, ComponentValidationErrorType},
AuditLogReason, AuditLogReasonError, Form, NullableField, Request,
},
response::{marker::EmptyBody, ResponseFuture},
routing::Route,
};
use serde::Serialize;
use std::{
error::Error,
fmt::{Display, Formatter, Result as FmtResult},
};
use twilight_model::{
application::component::Component,
channel::{embed::Embed, message::AllowedMentions, Attachment},
id::{MessageId, WebhookId},
};
/// A webhook's message can not be updated as configured.
#[derive(Debug)]
pub struct UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType,
source: Option<Box<dyn Error + Send + Sync>>,
}
impl UpdateWebhookMessageError {
/// Immutable reference to the type of error that occurred.
#[must_use = "retrieving the type has no effect if left unused"]
pub const fn kind(&self) -> &UpdateWebhookMessageErrorType {
&self.kind
}
/// Consume the error, returning the source error if there is any.
#[must_use = "consuming the error and retrieving the source has no effect if left unused"]
pub fn into_source(self) -> Option<Box<dyn Error + Send + Sync>> {
self.source
}
/// Consume the error, returning the owned error type and the source error.
#[must_use = "consuming the error into its parts has no effect if left unused"]
pub fn into_parts(
self,
) -> (
UpdateWebhookMessageErrorType,
Option<Box<dyn Error + Send + Sync>>,
) {
(self.kind, self.source)
}
}
impl Display for UpdateWebhookMessageError {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match &self.kind {
UpdateWebhookMessageErrorType::ComponentCount { count } => {
Display::fmt(count, f)?;
f.write_str(" components were provided, but only ")?;
Display::fmt(&ComponentValidationError::COMPONENT_COUNT, f)?;
f.write_str(" root components are allowed")
}
UpdateWebhookMessageErrorType::ComponentInvalid {.. } => {
f.write_str("a provided component is invalid")
}
UpdateWebhookMessageErrorType::ContentInvalid => {
f.write_str("message content is invalid")
}
UpdateWebhookMessageErrorType::EmbedTooLarge {.. } => {
f.write_str("length of one of the embeds is too large")
}
UpdateWebhookMessageErrorType::TooManyEmbeds => {
f.write_str("only 10 embeds may be provided")
}
}
}
}
impl Error for UpdateWebhookMessageError {
fn source(&self) -> Option<&(dyn Error +'static)> {
self.source
.as_ref()
.map(|source| &**source as &(dyn Error +'static))
}
}
/// Type of [`UpdateWebhookMessageError`] that occurred.
#[derive(Debug)]
#[non_exhaustive]
pub enum UpdateWebhookMessageErrorType {
/// Content is over 2000 UTF-16 characters.
ContentInvalid,
/// Length of one of the embeds is over 6000 characters.
EmbedTooLarge {
/// Index of the embed that was too large.
///
/// This can be used to index into the provided embeds to retrieve the
/// invalid embed.
index: usize,
},
/// An invalid message component was provided.
ComponentInvalid {
/// Additional details about the validation failure type.
kind: ComponentValidationErrorType,
},
/// Too many message components were provided.
ComponentCount {
/// Number of components that were provided.
count: usize,
},
/// Too many embeds were provided.
///
/// A webhook can have up to 10 embeds.
TooManyEmbeds,
}
#[derive(Serialize)]
struct UpdateWebhookMessageFields<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
allowed_mentions: Option<AllowedMentions>,
#[serde(skip_serializing_if = "request::slice_is_empty")]
attachments: &'a [Attachment],
#[serde(skip_serializing_if = "Option::is_none")]
components: Option<NullableField<&'a [Component]>>,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<NullableField<&'a str>>,
#[serde(skip_serializing_if = "Option::is_none")]
embeds: Option<NullableField<&'a [Embed]>>,
#[serde(skip_serializing_if = "Option::is_none")]
payload_json: Option<&'a [u8]>,
}
/// Update a message created by a webhook.
///
/// A webhook's message must always have at least one embed or some amount of
/// content. If you wish to delete a webhook's message refer to
/// [`DeleteWebhookMessage`].
///
/// # Examples
///
/// Update a webhook's message by setting the content to `test <@3>` -
/// attempting to mention user ID 3 - and specifying that only that the user may
/// not be mentioned.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_model::{
/// channel::message::AllowedMentions,
/// id::{MessageId, WebhookId}
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// client.update_webhook_message(WebhookId(1), "token here", MessageId(2))
/// // By creating a default set of allowed mentions, no entity can be
/// // mentioned.
/// .allowed_mentions(AllowedMentions::default())
/// .content(Some("test <@3>"))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage
#[must_use = "requests must be configured and executed"]
pub struct UpdateWebhookMessage<'a> {
fields: UpdateWebhookMessageFields<'a>,
files: &'a [(&'a str, &'a [u8])],
http: &'a Client,
message_id: MessageId,
reason: Option<&'a str>,
token: &'a str,
webhook_id: WebhookId,
}
impl<'a> UpdateWebhookMessage<'a> {
/// Maximum number of embeds that a webhook's message may have.
pub const EMBED_COUNT_LIMIT: usize = 10;
pub(crate) const fn new(
http: &'a Client,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> Self {
Self {
fields: UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: None,
embeds: None,
payload_json: None,
},
files: &[],
http,
message_id,
reason: None,
token,
webhook_id,
}
}
/// Set the allowed mentions in the message.
pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self {
self.fields.allowed_mentions.replace(allowed);
self
}
/// Specify multiple attachments already present in the target message to keep.
///
/// If called, all unspecified attachments will be removed from the message.
/// If not called, all attachments will be kept.
pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self {
self.fields.attachments = attachments;
self
}
/// Add multiple [`Component`]s to a message.
///
/// Calling this method multiple times will clear previous calls.
///
/// Pass `None` to clear existing components.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error
/// type if too many components are provided.
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error
/// type if one of the provided components is invalid.
pub fn components(
mut self,
components: Option<&'a [Component]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(components) = components.as_ref() {
validate_inner::components(components).map_err(|source| {
let (kind, inner_source) = source.into_parts();
match kind {
ComponentValidationErrorType::ComponentCount { count } => {
UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentCount { count },
source: inner_source,
}
}
other => UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other },
source: inner_source,
},
}
})?;
}
self.fields.components = Some(NullableField(components));
Ok(self)
}
/// Set the content of the message.
///
/// Pass `None` if you want to remove the message content.
///
/// Note that if there is are no embeds then you will not be able to remove
/// the content of the message.
///
/// The maximum length is 2000 UTF-16 characters.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if
/// the content length is too long.
pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> {
if let Some(content_ref) = content {
if!validate_inner::content_limit(content_ref) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ContentInvalid,
source: None,
});
}
}
self.fields.content = Some(NullableField(content));
Ok(self)
}
/// Set the list of embeds of the webhook's message.
///
/// Pass `None` to remove all of the embeds.
///
/// The maximum number of allowed embeds is defined by
/// [`EMBED_COUNT_LIMIT`].
///
/// The total character length of each embed must not exceed 6000
/// characters. Additionally, the internal fields also have character
/// limits. Refer to [the discord docs] for more information.
///
/// # Examples
///
/// Create an embed and update the message with the new embed. The content
/// of the original message is unaffected and only the embed(s) are
/// modified.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_embed_builder::EmbedBuilder;
/// use twilight_model::id::{MessageId, WebhookId};
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// let embed = EmbedBuilder::new()
/// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.")
/// .title("Twilight")
/// .url("https://twilight.rs")
/// .build()?;
///
/// client.update_webhook_message(WebhookId(1), "token", MessageId(2))
/// .embeds(Some(&[embed]))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type
/// if one of the embeds are too large.
///
/// Returns an [`UpdateWebhookMessageErrorType::TooManyEmbeds`] error type
/// if more than 10 embeds are provided.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/channel#embed-limits
/// [`EMBED_COUNT_LIMIT`]: Self::EMBED_COUNT_LIMIT
pub fn embeds(
mut self,
embeds: Option<&'a [Embed]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(embeds_present) = embeds.as_deref() {
if embeds_present.len() > Self::EMBED_COUNT_LIMIT {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::TooManyEmbeds,
source: None,
});
}
for (idx, embed) in embeds_present.iter().enumerate() {
if let Err(source) = validate_inner::embed(embed) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::EmbedTooLarge { index: idx },
source: Some(Box::new(source)),
});
}
}
}
self.fields.embeds = Some(NullableField(embeds));
Ok(self)
}
/// Attach multiple files to the webhook.
///
/// Calling this method will clear any previous calls.
pub const fn files(mut self, files: &'a [(&'a str, &'a [u8])]) -> Self {
self.files = files;
self
}
/// JSON encoded body of any additional request fields.
///
/// If this method is called, all other fields are ignored, except for
/// [`files`]. See [Discord Docs/Create Message] and
/// [`ExecuteWebhook::payload_json`].
///
/// [`files`]: Self::files
/// [`ExecuteWebhook::payload_json`]: super::ExecuteWebhook::payload_json
/// [Discord Docs/Create Message]: https://discord.com/developers/docs/resources/channel#create-message-params
pub const fn payload_json(mut self, payload_json: &'a [u8]) -> Self |
// `self` needs to be consumed and the client returned due to parameters
// being consumed in request construction.
fn request(&mut self) -> Result<Request, HttpError> {
let mut request = Request::builder(&Route::UpdateWebhookMessage {
message_id: self.message_id.0,
token: self.token,
webhook_id: self.webhook_id.0,
})
.use_authorization_token(false);
if!self.files.is_empty() || self.fields.payload_json.is_some() {
let mut form = Form::new();
for (index, (name, file)) in self.files.iter().enumerate() {
form.file(format!("{}", index).as_bytes(), name.as_bytes(), file);
}
if let Some(payload_json) = &self.fields.payload_json {
form.payload_json(payload_json);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
let body = crate::json::to_vec(&self.fields).map_err(HttpError::json)?;
form.payload_json(&body);
}
request = request.form(form);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
request = request.json(&self.fields)?;
}
if let Some(reason) = self.reason.as_ref() {
request = request.headers(request::audit_header(reason)?);
}
Ok(request.build())
}
/// Execute the request, returning a future resolving to a [`Response`].
///
/// [`Response`]: crate::response::Response
pub fn exec(mut self) -> ResponseFuture<EmptyBody> {
match self.request() {
Ok(request) => self.http.request(request),
Err(source) => ResponseFuture::error(source),
}
}
}
impl<'a> AuditLogReason<'a> for UpdateWebhookMessage<'a> {
fn reason(mut self, reason: &'a str) -> Result<Self, AuditLogReasonError> {
self.reason.replace(AuditLogReasonError::validate(reason)?);
Ok(self)
}
}
#[cfg(test)]
mod tests {
use super::{UpdateWebhookMessage, UpdateWebhookMessageFields};
use crate::{
client::Client,
request::{AuditLogReason, NullableField, Request},
routing::Route,
};
use twilight_model::id::{MessageId, WebhookId};
#[test]
fn test_request() {
let client = Client::new("token".to_owned());
let mut builder = UpdateWebhookMessage::new(&client, WebhookId(1), "token", MessageId(2))
.content(Some("test"))
.expect("'test' content couldn't be set")
.reason("reason")
.expect("'reason' is not a valid reason");
let actual = builder.request().expect("failed to create request");
let body = UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: Some(NullableField(Some("test"))),
embeds: None,
payload_json: None,
};
let route = Route::UpdateWebhookMessage {
message_id: 2,
token: "token",
webhook_id: 1,
};
let expected = Request::builder(&route)
.json(&body)
.expect("failed to serialize body")
.build();
assert_eq!(expected.body, actual.body);
assert_eq!(expected.path, actual.path);
}
}
| {
self.fields.payload_json = Some(payload_json);
self
} | identifier_body |
update_webhook_message.rs | //! Update a message created by a webhook via execution.
use crate::{
client::Client,
error::Error as HttpError,
request::{
self,
validate_inner::{self, ComponentValidationError, ComponentValidationErrorType},
AuditLogReason, AuditLogReasonError, Form, NullableField, Request,
},
response::{marker::EmptyBody, ResponseFuture},
routing::Route,
};
use serde::Serialize;
use std::{
error::Error,
fmt::{Display, Formatter, Result as FmtResult},
};
use twilight_model::{
application::component::Component,
channel::{embed::Embed, message::AllowedMentions, Attachment},
id::{MessageId, WebhookId},
};
/// A webhook's message can not be updated as configured.
#[derive(Debug)]
pub struct UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType,
source: Option<Box<dyn Error + Send + Sync>>,
}
impl UpdateWebhookMessageError {
/// Immutable reference to the type of error that occurred.
#[must_use = "retrieving the type has no effect if left unused"]
pub const fn kind(&self) -> &UpdateWebhookMessageErrorType {
&self.kind
}
/// Consume the error, returning the source error if there is any.
#[must_use = "consuming the error and retrieving the source has no effect if left unused"]
pub fn into_source(self) -> Option<Box<dyn Error + Send + Sync>> {
self.source
}
/// Consume the error, returning the owned error type and the source error.
#[must_use = "consuming the error into its parts has no effect if left unused"]
pub fn into_parts(
self,
) -> (
UpdateWebhookMessageErrorType,
Option<Box<dyn Error + Send + Sync>>,
) {
(self.kind, self.source)
}
}
impl Display for UpdateWebhookMessageError {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match &self.kind {
UpdateWebhookMessageErrorType::ComponentCount { count } => {
Display::fmt(count, f)?;
f.write_str(" components were provided, but only ")?;
Display::fmt(&ComponentValidationError::COMPONENT_COUNT, f)?;
f.write_str(" root components are allowed")
}
UpdateWebhookMessageErrorType::ComponentInvalid {.. } => {
f.write_str("a provided component is invalid")
}
UpdateWebhookMessageErrorType::ContentInvalid => {
f.write_str("message content is invalid")
}
UpdateWebhookMessageErrorType::EmbedTooLarge {.. } => {
f.write_str("length of one of the embeds is too large")
}
UpdateWebhookMessageErrorType::TooManyEmbeds => {
f.write_str("only 10 embeds may be provided")
}
}
}
}
impl Error for UpdateWebhookMessageError {
fn source(&self) -> Option<&(dyn Error +'static)> {
self.source
.as_ref()
.map(|source| &**source as &(dyn Error +'static))
}
}
/// Type of [`UpdateWebhookMessageError`] that occurred.
#[derive(Debug)]
#[non_exhaustive]
pub enum UpdateWebhookMessageErrorType {
/// Content is over 2000 UTF-16 characters.
ContentInvalid,
/// Length of one of the embeds is over 6000 characters.
EmbedTooLarge {
/// Index of the embed that was too large.
///
/// This can be used to index into the provided embeds to retrieve the
/// invalid embed.
index: usize,
},
/// An invalid message component was provided.
ComponentInvalid {
/// Additional details about the validation failure type.
kind: ComponentValidationErrorType,
},
/// Too many message components were provided.
ComponentCount {
/// Number of components that were provided.
count: usize,
},
/// Too many embeds were provided.
///
/// A webhook can have up to 10 embeds.
TooManyEmbeds,
}
#[derive(Serialize)]
struct UpdateWebhookMessageFields<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
allowed_mentions: Option<AllowedMentions>,
#[serde(skip_serializing_if = "request::slice_is_empty")]
attachments: &'a [Attachment],
#[serde(skip_serializing_if = "Option::is_none")]
components: Option<NullableField<&'a [Component]>>,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<NullableField<&'a str>>,
#[serde(skip_serializing_if = "Option::is_none")]
embeds: Option<NullableField<&'a [Embed]>>,
#[serde(skip_serializing_if = "Option::is_none")]
payload_json: Option<&'a [u8]>,
}
/// Update a message created by a webhook.
///
/// A webhook's message must always have at least one embed or some amount of
/// content. If you wish to delete a webhook's message refer to
/// [`DeleteWebhookMessage`].
///
/// # Examples
///
/// Update a webhook's message by setting the content to `test <@3>` -
/// attempting to mention user ID 3 - and specifying that only that the user may
/// not be mentioned.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_model::{
/// channel::message::AllowedMentions,
/// id::{MessageId, WebhookId}
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// client.update_webhook_message(WebhookId(1), "token here", MessageId(2))
/// // By creating a default set of allowed mentions, no entity can be
/// // mentioned.
/// .allowed_mentions(AllowedMentions::default())
/// .content(Some("test <@3>"))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage
#[must_use = "requests must be configured and executed"]
pub struct UpdateWebhookMessage<'a> {
fields: UpdateWebhookMessageFields<'a>,
files: &'a [(&'a str, &'a [u8])],
http: &'a Client,
message_id: MessageId,
reason: Option<&'a str>,
token: &'a str,
webhook_id: WebhookId,
}
impl<'a> UpdateWebhookMessage<'a> {
/// Maximum number of embeds that a webhook's message may have.
pub const EMBED_COUNT_LIMIT: usize = 10;
pub(crate) const fn new(
http: &'a Client,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> Self {
Self {
fields: UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: None,
embeds: None,
payload_json: None,
},
files: &[],
http,
message_id,
reason: None,
token,
webhook_id,
}
}
/// Set the allowed mentions in the message.
pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self {
self.fields.allowed_mentions.replace(allowed);
self
}
/// Specify multiple attachments already present in the target message to keep.
///
/// If called, all unspecified attachments will be removed from the message.
/// If not called, all attachments will be kept.
pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self {
self.fields.attachments = attachments;
self
}
/// Add multiple [`Component`]s to a message.
///
/// Calling this method multiple times will clear previous calls.
///
/// Pass `None` to clear existing components.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error
/// type if too many components are provided.
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error
/// type if one of the provided components is invalid.
pub fn components(
mut self,
components: Option<&'a [Component]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(components) = components.as_ref() {
validate_inner::components(components).map_err(|source| {
let (kind, inner_source) = source.into_parts();
match kind {
ComponentValidationErrorType::ComponentCount { count } => {
UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentCount { count },
source: inner_source,
}
}
other => UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other },
source: inner_source,
},
}
})?;
}
self.fields.components = Some(NullableField(components));
Ok(self)
}
/// Set the content of the message.
///
/// Pass `None` if you want to remove the message content.
///
/// Note that if there is are no embeds then you will not be able to remove
/// the content of the message.
///
/// The maximum length is 2000 UTF-16 characters.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if
/// the content length is too long.
pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> {
if let Some(content_ref) = content {
if!validate_inner::content_limit(content_ref) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ContentInvalid,
source: None,
});
}
}
self.fields.content = Some(NullableField(content));
Ok(self)
}
/// Set the list of embeds of the webhook's message.
///
/// Pass `None` to remove all of the embeds.
///
/// The maximum number of allowed embeds is defined by
/// [`EMBED_COUNT_LIMIT`].
///
/// The total character length of each embed must not exceed 6000
/// characters. Additionally, the internal fields also have character
/// limits. Refer to [the discord docs] for more information.
///
/// # Examples
/// | /// of the original message is unaffected and only the embed(s) are
/// modified.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_embed_builder::EmbedBuilder;
/// use twilight_model::id::{MessageId, WebhookId};
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// let embed = EmbedBuilder::new()
/// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.")
/// .title("Twilight")
/// .url("https://twilight.rs")
/// .build()?;
///
/// client.update_webhook_message(WebhookId(1), "token", MessageId(2))
/// .embeds(Some(&[embed]))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type
/// if one of the embeds are too large.
///
/// Returns an [`UpdateWebhookMessageErrorType::TooManyEmbeds`] error type
/// if more than 10 embeds are provided.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/channel#embed-limits
/// [`EMBED_COUNT_LIMIT`]: Self::EMBED_COUNT_LIMIT
pub fn embeds(
mut self,
embeds: Option<&'a [Embed]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(embeds_present) = embeds.as_deref() {
if embeds_present.len() > Self::EMBED_COUNT_LIMIT {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::TooManyEmbeds,
source: None,
});
}
for (idx, embed) in embeds_present.iter().enumerate() {
if let Err(source) = validate_inner::embed(embed) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::EmbedTooLarge { index: idx },
source: Some(Box::new(source)),
});
}
}
}
self.fields.embeds = Some(NullableField(embeds));
Ok(self)
}
/// Attach multiple files to the webhook.
///
/// Calling this method will clear any previous calls.
pub const fn files(mut self, files: &'a [(&'a str, &'a [u8])]) -> Self {
self.files = files;
self
}
/// JSON encoded body of any additional request fields.
///
/// If this method is called, all other fields are ignored, except for
/// [`files`]. See [Discord Docs/Create Message] and
/// [`ExecuteWebhook::payload_json`].
///
/// [`files`]: Self::files
/// [`ExecuteWebhook::payload_json`]: super::ExecuteWebhook::payload_json
/// [Discord Docs/Create Message]: https://discord.com/developers/docs/resources/channel#create-message-params
pub const fn payload_json(mut self, payload_json: &'a [u8]) -> Self {
self.fields.payload_json = Some(payload_json);
self
}
// `self` needs to be consumed and the client returned due to parameters
// being consumed in request construction.
fn request(&mut self) -> Result<Request, HttpError> {
let mut request = Request::builder(&Route::UpdateWebhookMessage {
message_id: self.message_id.0,
token: self.token,
webhook_id: self.webhook_id.0,
})
.use_authorization_token(false);
if!self.files.is_empty() || self.fields.payload_json.is_some() {
let mut form = Form::new();
for (index, (name, file)) in self.files.iter().enumerate() {
form.file(format!("{}", index).as_bytes(), name.as_bytes(), file);
}
if let Some(payload_json) = &self.fields.payload_json {
form.payload_json(payload_json);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
let body = crate::json::to_vec(&self.fields).map_err(HttpError::json)?;
form.payload_json(&body);
}
request = request.form(form);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
request = request.json(&self.fields)?;
}
if let Some(reason) = self.reason.as_ref() {
request = request.headers(request::audit_header(reason)?);
}
Ok(request.build())
}
/// Execute the request, returning a future resolving to a [`Response`].
///
/// [`Response`]: crate::response::Response
pub fn exec(mut self) -> ResponseFuture<EmptyBody> {
match self.request() {
Ok(request) => self.http.request(request),
Err(source) => ResponseFuture::error(source),
}
}
}
impl<'a> AuditLogReason<'a> for UpdateWebhookMessage<'a> {
fn reason(mut self, reason: &'a str) -> Result<Self, AuditLogReasonError> {
self.reason.replace(AuditLogReasonError::validate(reason)?);
Ok(self)
}
}
#[cfg(test)]
mod tests {
use super::{UpdateWebhookMessage, UpdateWebhookMessageFields};
use crate::{
client::Client,
request::{AuditLogReason, NullableField, Request},
routing::Route,
};
use twilight_model::id::{MessageId, WebhookId};
#[test]
fn test_request() {
let client = Client::new("token".to_owned());
let mut builder = UpdateWebhookMessage::new(&client, WebhookId(1), "token", MessageId(2))
.content(Some("test"))
.expect("'test' content couldn't be set")
.reason("reason")
.expect("'reason' is not a valid reason");
let actual = builder.request().expect("failed to create request");
let body = UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: Some(NullableField(Some("test"))),
embeds: None,
payload_json: None,
};
let route = Route::UpdateWebhookMessage {
message_id: 2,
token: "token",
webhook_id: 1,
};
let expected = Request::builder(&route)
.json(&body)
.expect("failed to serialize body")
.build();
assert_eq!(expected.body, actual.body);
assert_eq!(expected.path, actual.path);
}
} | /// Create an embed and update the message with the new embed. The content | random_line_split |
update_webhook_message.rs | //! Update a message created by a webhook via execution.
use crate::{
client::Client,
error::Error as HttpError,
request::{
self,
validate_inner::{self, ComponentValidationError, ComponentValidationErrorType},
AuditLogReason, AuditLogReasonError, Form, NullableField, Request,
},
response::{marker::EmptyBody, ResponseFuture},
routing::Route,
};
use serde::Serialize;
use std::{
error::Error,
fmt::{Display, Formatter, Result as FmtResult},
};
use twilight_model::{
application::component::Component,
channel::{embed::Embed, message::AllowedMentions, Attachment},
id::{MessageId, WebhookId},
};
/// A webhook's message can not be updated as configured.
#[derive(Debug)]
pub struct UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType,
source: Option<Box<dyn Error + Send + Sync>>,
}
impl UpdateWebhookMessageError {
/// Immutable reference to the type of error that occurred.
#[must_use = "retrieving the type has no effect if left unused"]
pub const fn kind(&self) -> &UpdateWebhookMessageErrorType {
&self.kind
}
/// Consume the error, returning the source error if there is any.
#[must_use = "consuming the error and retrieving the source has no effect if left unused"]
pub fn into_source(self) -> Option<Box<dyn Error + Send + Sync>> {
self.source
}
/// Consume the error, returning the owned error type and the source error.
#[must_use = "consuming the error into its parts has no effect if left unused"]
pub fn into_parts(
self,
) -> (
UpdateWebhookMessageErrorType,
Option<Box<dyn Error + Send + Sync>>,
) {
(self.kind, self.source)
}
}
impl Display for UpdateWebhookMessageError {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match &self.kind {
UpdateWebhookMessageErrorType::ComponentCount { count } => {
Display::fmt(count, f)?;
f.write_str(" components were provided, but only ")?;
Display::fmt(&ComponentValidationError::COMPONENT_COUNT, f)?;
f.write_str(" root components are allowed")
}
UpdateWebhookMessageErrorType::ComponentInvalid {.. } => {
f.write_str("a provided component is invalid")
}
UpdateWebhookMessageErrorType::ContentInvalid => {
f.write_str("message content is invalid")
}
UpdateWebhookMessageErrorType::EmbedTooLarge {.. } => {
f.write_str("length of one of the embeds is too large")
}
UpdateWebhookMessageErrorType::TooManyEmbeds => {
f.write_str("only 10 embeds may be provided")
}
}
}
}
impl Error for UpdateWebhookMessageError {
fn source(&self) -> Option<&(dyn Error +'static)> {
self.source
.as_ref()
.map(|source| &**source as &(dyn Error +'static))
}
}
/// Type of [`UpdateWebhookMessageError`] that occurred.
#[derive(Debug)]
#[non_exhaustive]
pub enum UpdateWebhookMessageErrorType {
/// Content is over 2000 UTF-16 characters.
ContentInvalid,
/// Length of one of the embeds is over 6000 characters.
EmbedTooLarge {
/// Index of the embed that was too large.
///
/// This can be used to index into the provided embeds to retrieve the
/// invalid embed.
index: usize,
},
/// An invalid message component was provided.
ComponentInvalid {
/// Additional details about the validation failure type.
kind: ComponentValidationErrorType,
},
/// Too many message components were provided.
ComponentCount {
/// Number of components that were provided.
count: usize,
},
/// Too many embeds were provided.
///
/// A webhook can have up to 10 embeds.
TooManyEmbeds,
}
#[derive(Serialize)]
struct UpdateWebhookMessageFields<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
allowed_mentions: Option<AllowedMentions>,
#[serde(skip_serializing_if = "request::slice_is_empty")]
attachments: &'a [Attachment],
#[serde(skip_serializing_if = "Option::is_none")]
components: Option<NullableField<&'a [Component]>>,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<NullableField<&'a str>>,
#[serde(skip_serializing_if = "Option::is_none")]
embeds: Option<NullableField<&'a [Embed]>>,
#[serde(skip_serializing_if = "Option::is_none")]
payload_json: Option<&'a [u8]>,
}
/// Update a message created by a webhook.
///
/// A webhook's message must always have at least one embed or some amount of
/// content. If you wish to delete a webhook's message refer to
/// [`DeleteWebhookMessage`].
///
/// # Examples
///
/// Update a webhook's message by setting the content to `test <@3>` -
/// attempting to mention user ID 3 - and specifying that only that the user may
/// not be mentioned.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_model::{
/// channel::message::AllowedMentions,
/// id::{MessageId, WebhookId}
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// client.update_webhook_message(WebhookId(1), "token here", MessageId(2))
/// // By creating a default set of allowed mentions, no entity can be
/// // mentioned.
/// .allowed_mentions(AllowedMentions::default())
/// .content(Some("test <@3>"))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage
#[must_use = "requests must be configured and executed"]
pub struct UpdateWebhookMessage<'a> {
fields: UpdateWebhookMessageFields<'a>,
files: &'a [(&'a str, &'a [u8])],
http: &'a Client,
message_id: MessageId,
reason: Option<&'a str>,
token: &'a str,
webhook_id: WebhookId,
}
impl<'a> UpdateWebhookMessage<'a> {
/// Maximum number of embeds that a webhook's message may have.
pub const EMBED_COUNT_LIMIT: usize = 10;
pub(crate) const fn new(
http: &'a Client,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> Self {
Self {
fields: UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: None,
embeds: None,
payload_json: None,
},
files: &[],
http,
message_id,
reason: None,
token,
webhook_id,
}
}
/// Set the allowed mentions in the message.
pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self {
self.fields.allowed_mentions.replace(allowed);
self
}
/// Specify multiple attachments already present in the target message to keep.
///
/// If called, all unspecified attachments will be removed from the message.
/// If not called, all attachments will be kept.
pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self {
self.fields.attachments = attachments;
self
}
/// Add multiple [`Component`]s to a message.
///
/// Calling this method multiple times will clear previous calls.
///
/// Pass `None` to clear existing components.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error
/// type if too many components are provided.
///
/// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error
/// type if one of the provided components is invalid.
pub fn components(
mut self,
components: Option<&'a [Component]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(components) = components.as_ref() |
self.fields.components = Some(NullableField(components));
Ok(self)
}
/// Set the content of the message.
///
/// Pass `None` if you want to remove the message content.
///
/// Note that if there is are no embeds then you will not be able to remove
/// the content of the message.
///
/// The maximum length is 2000 UTF-16 characters.
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if
/// the content length is too long.
pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> {
if let Some(content_ref) = content {
if!validate_inner::content_limit(content_ref) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ContentInvalid,
source: None,
});
}
}
self.fields.content = Some(NullableField(content));
Ok(self)
}
/// Set the list of embeds of the webhook's message.
///
/// Pass `None` to remove all of the embeds.
///
/// The maximum number of allowed embeds is defined by
/// [`EMBED_COUNT_LIMIT`].
///
/// The total character length of each embed must not exceed 6000
/// characters. Additionally, the internal fields also have character
/// limits. Refer to [the discord docs] for more information.
///
/// # Examples
///
/// Create an embed and update the message with the new embed. The content
/// of the original message is unaffected and only the embed(s) are
/// modified.
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_embed_builder::EmbedBuilder;
/// use twilight_model::id::{MessageId, WebhookId};
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// let embed = EmbedBuilder::new()
/// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.")
/// .title("Twilight")
/// .url("https://twilight.rs")
/// .build()?;
///
/// client.update_webhook_message(WebhookId(1), "token", MessageId(2))
/// .embeds(Some(&[embed]))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type
/// if one of the embeds are too large.
///
/// Returns an [`UpdateWebhookMessageErrorType::TooManyEmbeds`] error type
/// if more than 10 embeds are provided.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/channel#embed-limits
/// [`EMBED_COUNT_LIMIT`]: Self::EMBED_COUNT_LIMIT
pub fn embeds(
mut self,
embeds: Option<&'a [Embed]>,
) -> Result<Self, UpdateWebhookMessageError> {
if let Some(embeds_present) = embeds.as_deref() {
if embeds_present.len() > Self::EMBED_COUNT_LIMIT {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::TooManyEmbeds,
source: None,
});
}
for (idx, embed) in embeds_present.iter().enumerate() {
if let Err(source) = validate_inner::embed(embed) {
return Err(UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::EmbedTooLarge { index: idx },
source: Some(Box::new(source)),
});
}
}
}
self.fields.embeds = Some(NullableField(embeds));
Ok(self)
}
/// Attach multiple files to the webhook.
///
/// Calling this method will clear any previous calls.
pub const fn files(mut self, files: &'a [(&'a str, &'a [u8])]) -> Self {
self.files = files;
self
}
/// JSON encoded body of any additional request fields.
///
/// If this method is called, all other fields are ignored, except for
/// [`files`]. See [Discord Docs/Create Message] and
/// [`ExecuteWebhook::payload_json`].
///
/// [`files`]: Self::files
/// [`ExecuteWebhook::payload_json`]: super::ExecuteWebhook::payload_json
/// [Discord Docs/Create Message]: https://discord.com/developers/docs/resources/channel#create-message-params
pub const fn payload_json(mut self, payload_json: &'a [u8]) -> Self {
self.fields.payload_json = Some(payload_json);
self
}
// `self` needs to be consumed and the client returned due to parameters
// being consumed in request construction.
fn request(&mut self) -> Result<Request, HttpError> {
let mut request = Request::builder(&Route::UpdateWebhookMessage {
message_id: self.message_id.0,
token: self.token,
webhook_id: self.webhook_id.0,
})
.use_authorization_token(false);
if!self.files.is_empty() || self.fields.payload_json.is_some() {
let mut form = Form::new();
for (index, (name, file)) in self.files.iter().enumerate() {
form.file(format!("{}", index).as_bytes(), name.as_bytes(), file);
}
if let Some(payload_json) = &self.fields.payload_json {
form.payload_json(payload_json);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
let body = crate::json::to_vec(&self.fields).map_err(HttpError::json)?;
form.payload_json(&body);
}
request = request.form(form);
} else {
if self.fields.allowed_mentions.is_none() {
self.fields.allowed_mentions = self.http.default_allowed_mentions();
}
request = request.json(&self.fields)?;
}
if let Some(reason) = self.reason.as_ref() {
request = request.headers(request::audit_header(reason)?);
}
Ok(request.build())
}
/// Execute the request, returning a future resolving to a [`Response`].
///
/// [`Response`]: crate::response::Response
pub fn exec(mut self) -> ResponseFuture<EmptyBody> {
match self.request() {
Ok(request) => self.http.request(request),
Err(source) => ResponseFuture::error(source),
}
}
}
impl<'a> AuditLogReason<'a> for UpdateWebhookMessage<'a> {
fn reason(mut self, reason: &'a str) -> Result<Self, AuditLogReasonError> {
self.reason.replace(AuditLogReasonError::validate(reason)?);
Ok(self)
}
}
#[cfg(test)]
mod tests {
use super::{UpdateWebhookMessage, UpdateWebhookMessageFields};
use crate::{
client::Client,
request::{AuditLogReason, NullableField, Request},
routing::Route,
};
use twilight_model::id::{MessageId, WebhookId};
#[test]
fn test_request() {
let client = Client::new("token".to_owned());
let mut builder = UpdateWebhookMessage::new(&client, WebhookId(1), "token", MessageId(2))
.content(Some("test"))
.expect("'test' content couldn't be set")
.reason("reason")
.expect("'reason' is not a valid reason");
let actual = builder.request().expect("failed to create request");
let body = UpdateWebhookMessageFields {
allowed_mentions: None,
attachments: &[],
components: None,
content: Some(NullableField(Some("test"))),
embeds: None,
payload_json: None,
};
let route = Route::UpdateWebhookMessage {
message_id: 2,
token: "token",
webhook_id: 1,
};
let expected = Request::builder(&route)
.json(&body)
.expect("failed to serialize body")
.build();
assert_eq!(expected.body, actual.body);
assert_eq!(expected.path, actual.path);
}
}
| {
validate_inner::components(components).map_err(|source| {
let (kind, inner_source) = source.into_parts();
match kind {
ComponentValidationErrorType::ComponentCount { count } => {
UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentCount { count },
source: inner_source,
}
}
other => UpdateWebhookMessageError {
kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other },
source: inner_source,
},
}
})?;
} | conditional_block |
mode.rs | use std::{io,process,str,thread,time};
use std::io::Error;
use std::result::Result;
use regex::Regex;
use serde::{Serialize,Deserialize};
use crate::{fileio,util};
#[derive(Debug)]
pub struct InputMode {
width:String,
height:String,
rate:String,
name:String,
display:String,
}
impl InputMode {
pub fn new(width:&str,height:&str,rate:&str,display:&str,name:&str) -> InputMode {
InputMode {
width:width.to_string(),
height:height.to_string(),
rate:rate.to_string(),
display:display.to_string(),
name:name.to_string()
}
}
}
#[derive(Clone,Debug,Serialize,Deserialize)]
pub struct CvtMode {
name: String,
clock: String,
h_disp: String,
h_sync_start: String,
h_sync_end: String,
h_total: String,
v_disp: String,
v_sync_start: String,
v_sync_end: String,
v_total: String,
flags: String,
}
impl CvtMode {
pub fn get_name(&self) -> &str {
&self.name
}
/*
pub fn new_empty() -> CvtMode {
CvtMode {
name: String::new(),
clock: String::new(),
h_disp: String::new(),
h_sync_start: String::new(),
h_sync_end: String::new(),
h_total: String::new(),
v_disp: String::new(),
v_sync_start: String::new(),
v_sync_end: String::new(),
v_total: String::new(),
flags: String::new(),
}
}
*/
}
// Some(d) would be a vec of the displays for which to delete the mode; if d is None, the mode will be removed from all connected displays
// xrandr doesn't seem to think the program has access to user-created modes for deletion;
// could run as root but would rather not.
// TODO: address deletion permission issue
/*
fn delete_mode_xrandr(n: &str, d: Option<Vec<String>>, verbose: bool) -> Result<(),Error> {
for display in d.unwrap() {
delete_mode(&n,&display);
}
let currents_handle = thread::spawn(move || get_current_modes(verbose));
let defaults_handle = thread::spawn(move || get_default_modes(verbose));
let currents = currents_handle.join().unwrap()?;
let defaults = defaults_handle.join().unwrap()?;
let displays = match d {
Some(disps) => disps,
None => {
let mut tmp: Vec<String> = Vec::with_capacity(currents.len());
for mode in ¤ts {
tmp.push(mode.display.clone());
}
tmp
}
};
println!("{:?}",¤ts);
// these loops are because xrandr doesn't let you update modes or delete them while in use
for disp in displays {
for default in &defaults {
if default.display == disp {
if verbose {
println!("Switching to default mode to allow updating of the current mode");
}
switch_mode(&default.name, &disp, verbose)?; // switch the display to its default mode to enable deletion of in-use mode
}
}
if verbose {
println!("Removing mode {} from display {}",&n,&disp);
}
let mut cmd = process::Command::new("xrandr");
cmd.arg("--delmode").arg(disp.clone()).arg(n.clone());
println!("{:?}",cmd.output().unwrap());
}
Ok(())
}
*/
pub fn add_mode(w: Option<&str>, h: Option<&str>, r: Option<&str>, d: Option<&str>, n: Option<&str>, t: Option<&str>, f: Option<&str>, test: bool, save: bool, verbose: bool) -> Result<(),Error> {
let current_modes = get_current_modes(verbose)?;
// Use first current display mode for parameters not supplied
// and as the fallback if test option is used
let width = w.unwrap_or(¤t_modes[0].width).to_string();
let height = h.unwrap_or(¤t_modes[0].height).to_string();
let rate = r.unwrap_or(¤t_modes[0].rate).to_string();
let display = d.unwrap_or(¤t_modes[0].display).to_string();
let tmp = format!("{}x{}_{}",width,height,rate);
// default test timeout is 10 seconds.
let name = match n {
Some(nm) => String::from(nm),
None => {
tmp
}
};
let i_mode = InputMode {
width,
height,
rate,
display: String::from(&display),
name: name.clone()
};
let mut d_vec: Vec<String> = Vec::with_capacity(1);
d_vec.push(display.clone());
// compute CVT timings and delete xrandr mode concurrently; wait for deletion before adding to xrandr
//let del_handle = thread::spawn(move || delete_mode_xrandr(&name, Some(d_vec), verbose));
let cvt_handle = thread::spawn(move || gen_cvt_mode(&i_mode, verbose));
let fallback_cvt_handle = thread::spawn(move || gen_cvt_mode(¤t_modes[0], verbose));
//let _ = del_handle.join().unwrap();
let cvt = cvt_handle.join().unwrap();
let fallback_cvt = fallback_cvt_handle.join().unwrap();
new_mode(&cvt, &display, verbose)?;
if test {
test_mode(&cvt, &fallback_cvt, &display, t, verbose)?;
}
if save {
fileio::save_mode(&cvt,f,verbose)?
}
Ok(())
}
pub fn apply_mode(n: &str, d: &str, t: Option<&str>, test: bool, persist: bool, verbose: bool) -> Result<(), io::Error> {
println!("Applying mode {} to display {}.",n,d);
let mode = fileio::get_mode(n, None, verbose).unwrap();
if test {
let default_modes = get_default_modes(verbose)?;
let default_mode = gen_cvt_mode(&default_modes[0],verbose);
test_mode(&mode, &default_mode, d, t, verbose)?;
println!("Keep the mode you just tested? y/n");
let mut input = String::new();
while!(input.contains("y") || input.contains("n")) {
let _ = io::stdin().read_line(&mut input);
if input.contains("n") {
return Ok(());
}
}
}
switch_mode(n, d, verbose)?;
if persist {
fileio::save_mode_persistent(&mode, verbose)?;
}
Ok(())
}
fn test_mode(mode: &CvtMode, default_mode: &CvtMode, display: &str, t: Option<&str>, verbose: bool) -> Result<(), io::Error> {
let name = &mode.get_name();
let default_name = &default_mode.get_name();
let timeout: u64 = match t {
Some(time) => {
let tmp = match time.parse() {
Ok(kk) => kk,
Err(_) => {
eprintln!("Error: timeout must be an integer greater than zero. Using default timeout of 10 seconds.");
10 // just default to 10 secs if invalid timeout provided rather than returning an error
}
};
if tmp > 0 {
tmp
} else {
10 // default to 10 secs if none given
}
}
None => 10
};
let delay = time::Duration::from_secs(timeout);
if verbose {
println!("Testing mode {} on display {} for {} secs.", name, display, timeout);
thread::sleep(time::Duration::from_secs(1));
}
if verbose {
let _ = thread::spawn(move || util::print_countdown(timeout)); // this should maybe print regardless of verbose option, idk
}
let handle = thread::spawn(move || thread::sleep(delay));
switch_mode(name, display, verbose)?;
handle.join().expect("Timer thread had an error.");
if verbose {
println!("Reverting to mode {} on display {}.", default_name, display);
}
switch_mode(default_name, display, verbose)?;
Ok(())
}
fn gen_cvt_mode(input: &InputMode, verbose: bool) -> CvtMode {
if verbose {
println!("Generating coordinated video timings for mode {}",input.name);
}
let mut cmd = process::Command::new("cvt");
cmd.arg(&input.width).arg(&input.height).arg(&input.rate);
let output = cmd.output().unwrap();
let out = str::from_utf8(&output.stdout).unwrap();
let lines: Vec<_> = out.split('"').collect();
let mut t: Vec<_> = lines[2][2..lines[2].len()-1].split(" ").collect();
let mut i=0;
while i < t.len() {
if t[i] == "" || t[i] == "\t" {
t.remove(i);
} else {
i += 1;
}
}
let tmp = CvtMode {
name: input.name.to_owned(),
clock: String::from(t[0]),
h_disp: String::from(t[1]),
h_sync_start: String::from(t[2]),
h_sync_end: String::from(t[3]),
h_total: String::from(t[4]),
v_disp: String::from(t[5]),
v_sync_start: String::from(t[6]),
v_sync_end: String::from(t[7]),
v_total: String::from(t[8]),
flags: format!("{} {}",t[9],t[10]),
};
if verbose {
println!("{:?}",tmp);
}
tmp
}
// Retrieves modes which are currently in use
fn get_current_modes(verbose: bool) -> Result<Vec<InputMode>, Error> {
if verbose {
println!("Retrieving current display configuration.");
}
let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)\*").unwrap();
util::get_modes_helper(&re, verbose)
}
// Retrieves the default modes for each display
fn get_default_modes(verbose: bool) -> Result<Vec<InputMode>, Error> {
if verbose {
println!("Retrieving current display configuration.");
}
let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)[\*]?\+").unwrap();
util::get_modes_helper(&re, verbose)
}
fn switch_mode(name: &str, display: &str, verbose: bool) -> Result<(), io::Error> {
let mut cmd = process::Command::new("xrandr");
cmd.arg("--output").arg(&display).arg("--mode").arg(name);
if verbose {
println!("Applying mode {} to display {}",name,&display);
}
cmd.output()?;
if verbose {
println!("Successfully applied mode {} to display {}",name, &display);
}
Ok(())
}
// Adds the newly created mode to xrandr
fn new_mode(mode: &CvtMode, display: &str, verbose: bool) -> Result<(), io::Error> {
let mut cmd = process::Command::new("xrandr"); | .arg(&mode.name)
.arg(&mode.clock)
.arg(&mode.h_disp)
.arg(&mode.h_sync_start)
.arg(&mode.h_sync_end)
.arg(&mode.h_total)
.arg(&mode.v_disp)
.arg(&mode.v_sync_start)
.arg(&mode.v_sync_end)
.arg(&mode.v_total)
.arg(&mode.flags);
if verbose {
println!("Creating xrandr mode {}",&mode.name);
}
cmd.output()?;
if verbose {
println!("Adding mode {} for display {}.",&mode.name,display);
}
cmd = process::Command::new("xrandr");
cmd.arg("--addmode").arg(display).arg(&mode.name);
cmd.output()?;
Ok(())
} | cmd.arg("--newmode") | random_line_split |
mode.rs | use std::{io,process,str,thread,time};
use std::io::Error;
use std::result::Result;
use regex::Regex;
use serde::{Serialize,Deserialize};
use crate::{fileio,util};
#[derive(Debug)]
pub struct InputMode {
width:String,
height:String,
rate:String,
name:String,
display:String,
}
impl InputMode {
pub fn new(width:&str,height:&str,rate:&str,display:&str,name:&str) -> InputMode {
InputMode {
width:width.to_string(),
height:height.to_string(),
rate:rate.to_string(),
display:display.to_string(),
name:name.to_string()
}
}
}
#[derive(Clone,Debug,Serialize,Deserialize)]
pub struct CvtMode {
name: String,
clock: String,
h_disp: String,
h_sync_start: String,
h_sync_end: String,
h_total: String,
v_disp: String,
v_sync_start: String,
v_sync_end: String,
v_total: String,
flags: String,
}
impl CvtMode {
pub fn get_name(&self) -> &str {
&self.name
}
/*
pub fn new_empty() -> CvtMode {
CvtMode {
name: String::new(),
clock: String::new(),
h_disp: String::new(),
h_sync_start: String::new(),
h_sync_end: String::new(),
h_total: String::new(),
v_disp: String::new(),
v_sync_start: String::new(),
v_sync_end: String::new(),
v_total: String::new(),
flags: String::new(),
}
}
*/
}
// Some(d) would be a vec of the displays for which to delete the mode; if d is None, the mode will be removed from all connected displays
// xrandr doesn't seem to think the program has access to user-created modes for deletion;
// could run as root but would rather not.
// TODO: address deletion permission issue
/*
fn delete_mode_xrandr(n: &str, d: Option<Vec<String>>, verbose: bool) -> Result<(),Error> {
for display in d.unwrap() {
delete_mode(&n,&display);
}
let currents_handle = thread::spawn(move || get_current_modes(verbose));
let defaults_handle = thread::spawn(move || get_default_modes(verbose));
let currents = currents_handle.join().unwrap()?;
let defaults = defaults_handle.join().unwrap()?;
let displays = match d {
Some(disps) => disps,
None => {
let mut tmp: Vec<String> = Vec::with_capacity(currents.len());
for mode in ¤ts {
tmp.push(mode.display.clone());
}
tmp
}
};
println!("{:?}",¤ts);
// these loops are because xrandr doesn't let you update modes or delete them while in use
for disp in displays {
for default in &defaults {
if default.display == disp {
if verbose {
println!("Switching to default mode to allow updating of the current mode");
}
switch_mode(&default.name, &disp, verbose)?; // switch the display to its default mode to enable deletion of in-use mode
}
}
if verbose {
println!("Removing mode {} from display {}",&n,&disp);
}
let mut cmd = process::Command::new("xrandr");
cmd.arg("--delmode").arg(disp.clone()).arg(n.clone());
println!("{:?}",cmd.output().unwrap());
}
Ok(())
}
*/
pub fn add_mode(w: Option<&str>, h: Option<&str>, r: Option<&str>, d: Option<&str>, n: Option<&str>, t: Option<&str>, f: Option<&str>, test: bool, save: bool, verbose: bool) -> Result<(),Error> {
let current_modes = get_current_modes(verbose)?;
// Use first current display mode for parameters not supplied
// and as the fallback if test option is used
let width = w.unwrap_or(¤t_modes[0].width).to_string();
let height = h.unwrap_or(¤t_modes[0].height).to_string();
let rate = r.unwrap_or(¤t_modes[0].rate).to_string();
let display = d.unwrap_or(¤t_modes[0].display).to_string();
let tmp = format!("{}x{}_{}",width,height,rate);
// default test timeout is 10 seconds.
let name = match n {
Some(nm) => String::from(nm),
None => {
tmp
}
};
let i_mode = InputMode {
width,
height,
rate,
display: String::from(&display),
name: name.clone()
};
let mut d_vec: Vec<String> = Vec::with_capacity(1);
d_vec.push(display.clone());
// compute CVT timings and delete xrandr mode concurrently; wait for deletion before adding to xrandr
//let del_handle = thread::spawn(move || delete_mode_xrandr(&name, Some(d_vec), verbose));
let cvt_handle = thread::spawn(move || gen_cvt_mode(&i_mode, verbose));
let fallback_cvt_handle = thread::spawn(move || gen_cvt_mode(¤t_modes[0], verbose));
//let _ = del_handle.join().unwrap();
let cvt = cvt_handle.join().unwrap();
let fallback_cvt = fallback_cvt_handle.join().unwrap();
new_mode(&cvt, &display, verbose)?;
if test {
test_mode(&cvt, &fallback_cvt, &display, t, verbose)?;
}
if save {
fileio::save_mode(&cvt,f,verbose)?
}
Ok(())
}
pub fn apply_mode(n: &str, d: &str, t: Option<&str>, test: bool, persist: bool, verbose: bool) -> Result<(), io::Error> {
println!("Applying mode {} to display {}.",n,d);
let mode = fileio::get_mode(n, None, verbose).unwrap();
if test {
let default_modes = get_default_modes(verbose)?;
let default_mode = gen_cvt_mode(&default_modes[0],verbose);
test_mode(&mode, &default_mode, d, t, verbose)?;
println!("Keep the mode you just tested? y/n");
let mut input = String::new();
while!(input.contains("y") || input.contains("n")) {
let _ = io::stdin().read_line(&mut input);
if input.contains("n") {
return Ok(());
}
}
}
switch_mode(n, d, verbose)?;
if persist {
fileio::save_mode_persistent(&mode, verbose)?;
}
Ok(())
}
fn test_mode(mode: &CvtMode, default_mode: &CvtMode, display: &str, t: Option<&str>, verbose: bool) -> Result<(), io::Error> {
let name = &mode.get_name();
let default_name = &default_mode.get_name();
let timeout: u64 = match t {
Some(time) => {
let tmp = match time.parse() {
Ok(kk) => kk,
Err(_) => {
eprintln!("Error: timeout must be an integer greater than zero. Using default timeout of 10 seconds.");
10 // just default to 10 secs if invalid timeout provided rather than returning an error
}
};
if tmp > 0 {
tmp
} else {
10 // default to 10 secs if none given
}
}
None => 10
};
let delay = time::Duration::from_secs(timeout);
if verbose {
println!("Testing mode {} on display {} for {} secs.", name, display, timeout);
thread::sleep(time::Duration::from_secs(1));
}
if verbose {
let _ = thread::spawn(move || util::print_countdown(timeout)); // this should maybe print regardless of verbose option, idk
}
let handle = thread::spawn(move || thread::sleep(delay));
switch_mode(name, display, verbose)?;
handle.join().expect("Timer thread had an error.");
if verbose {
println!("Reverting to mode {} on display {}.", default_name, display);
}
switch_mode(default_name, display, verbose)?;
Ok(())
}
fn gen_cvt_mode(input: &InputMode, verbose: bool) -> CvtMode {
if verbose {
println!("Generating coordinated video timings for mode {}",input.name);
}
let mut cmd = process::Command::new("cvt");
cmd.arg(&input.width).arg(&input.height).arg(&input.rate);
let output = cmd.output().unwrap();
let out = str::from_utf8(&output.stdout).unwrap();
let lines: Vec<_> = out.split('"').collect();
let mut t: Vec<_> = lines[2][2..lines[2].len()-1].split(" ").collect();
let mut i=0;
while i < t.len() {
if t[i] == "" || t[i] == "\t" {
t.remove(i);
} else {
i += 1;
}
}
let tmp = CvtMode {
name: input.name.to_owned(),
clock: String::from(t[0]),
h_disp: String::from(t[1]),
h_sync_start: String::from(t[2]),
h_sync_end: String::from(t[3]),
h_total: String::from(t[4]),
v_disp: String::from(t[5]),
v_sync_start: String::from(t[6]),
v_sync_end: String::from(t[7]),
v_total: String::from(t[8]),
flags: format!("{} {}",t[9],t[10]),
};
if verbose {
println!("{:?}",tmp);
}
tmp
}
// Retrieves modes which are currently in use
fn get_current_modes(verbose: bool) -> Result<Vec<InputMode>, Error> {
if verbose {
println!("Retrieving current display configuration.");
}
let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)\*").unwrap();
util::get_modes_helper(&re, verbose)
}
// Retrieves the default modes for each display
fn get_default_modes(verbose: bool) -> Result<Vec<InputMode>, Error> {
if verbose {
println!("Retrieving current display configuration.");
}
let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)[\*]?\+").unwrap();
util::get_modes_helper(&re, verbose)
}
fn switch_mode(name: &str, display: &str, verbose: bool) -> Result<(), io::Error> {
let mut cmd = process::Command::new("xrandr");
cmd.arg("--output").arg(&display).arg("--mode").arg(name);
if verbose {
println!("Applying mode {} to display {}",name,&display);
}
cmd.output()?;
if verbose {
println!("Successfully applied mode {} to display {}",name, &display);
}
Ok(())
}
// Adds the newly created mode to xrandr
fn new_mode(mode: &CvtMode, display: &str, verbose: bool) -> Result<(), io::Error> | }
cmd = process::Command::new("xrandr");
cmd.arg("--addmode").arg(display).arg(&mode.name);
cmd.output()?;
Ok(())
}
| {
let mut cmd = process::Command::new("xrandr");
cmd.arg("--newmode")
.arg(&mode.name)
.arg(&mode.clock)
.arg(&mode.h_disp)
.arg(&mode.h_sync_start)
.arg(&mode.h_sync_end)
.arg(&mode.h_total)
.arg(&mode.v_disp)
.arg(&mode.v_sync_start)
.arg(&mode.v_sync_end)
.arg(&mode.v_total)
.arg(&mode.flags);
if verbose {
println!("Creating xrandr mode {}",&mode.name);
}
cmd.output()?;
if verbose {
println!("Adding mode {} for display {}.",&mode.name,display); | identifier_body |
mode.rs | use std::{io,process,str,thread,time};
use std::io::Error;
use std::result::Result;
use regex::Regex;
use serde::{Serialize,Deserialize};
use crate::{fileio,util};
#[derive(Debug)]
pub struct InputMode {
width:String,
height:String,
rate:String,
name:String,
display:String,
}
impl InputMode {
pub fn new(width:&str,height:&str,rate:&str,display:&str,name:&str) -> InputMode {
InputMode {
width:width.to_string(),
height:height.to_string(),
rate:rate.to_string(),
display:display.to_string(),
name:name.to_string()
}
}
}
#[derive(Clone,Debug,Serialize,Deserialize)]
pub struct CvtMode {
name: String,
clock: String,
h_disp: String,
h_sync_start: String,
h_sync_end: String,
h_total: String,
v_disp: String,
v_sync_start: String,
v_sync_end: String,
v_total: String,
flags: String,
}
impl CvtMode {
pub fn get_name(&self) -> &str {
&self.name
}
/*
pub fn new_empty() -> CvtMode {
CvtMode {
name: String::new(),
clock: String::new(),
h_disp: String::new(),
h_sync_start: String::new(),
h_sync_end: String::new(),
h_total: String::new(),
v_disp: String::new(),
v_sync_start: String::new(),
v_sync_end: String::new(),
v_total: String::new(),
flags: String::new(),
}
}
*/
}
// Some(d) would be a vec of the displays for which to delete the mode; if d is None, the mode will be removed from all connected displays
// xrandr doesn't seem to think the program has access to user-created modes for deletion;
// could run as root but would rather not.
// TODO: address deletion permission issue
/*
fn delete_mode_xrandr(n: &str, d: Option<Vec<String>>, verbose: bool) -> Result<(),Error> {
for display in d.unwrap() {
delete_mode(&n,&display);
}
let currents_handle = thread::spawn(move || get_current_modes(verbose));
let defaults_handle = thread::spawn(move || get_default_modes(verbose));
let currents = currents_handle.join().unwrap()?;
let defaults = defaults_handle.join().unwrap()?;
let displays = match d {
Some(disps) => disps,
None => {
let mut tmp: Vec<String> = Vec::with_capacity(currents.len());
for mode in ¤ts {
tmp.push(mode.display.clone());
}
tmp
}
};
println!("{:?}",¤ts);
// these loops are because xrandr doesn't let you update modes or delete them while in use
for disp in displays {
for default in &defaults {
if default.display == disp {
if verbose {
println!("Switching to default mode to allow updating of the current mode");
}
switch_mode(&default.name, &disp, verbose)?; // switch the display to its default mode to enable deletion of in-use mode
}
}
if verbose {
println!("Removing mode {} from display {}",&n,&disp);
}
let mut cmd = process::Command::new("xrandr");
cmd.arg("--delmode").arg(disp.clone()).arg(n.clone());
println!("{:?}",cmd.output().unwrap());
}
Ok(())
}
*/
pub fn | (w: Option<&str>, h: Option<&str>, r: Option<&str>, d: Option<&str>, n: Option<&str>, t: Option<&str>, f: Option<&str>, test: bool, save: bool, verbose: bool) -> Result<(),Error> {
let current_modes = get_current_modes(verbose)?;
// Use first current display mode for parameters not supplied
// and as the fallback if test option is used
let width = w.unwrap_or(¤t_modes[0].width).to_string();
let height = h.unwrap_or(¤t_modes[0].height).to_string();
let rate = r.unwrap_or(¤t_modes[0].rate).to_string();
let display = d.unwrap_or(¤t_modes[0].display).to_string();
let tmp = format!("{}x{}_{}",width,height,rate);
// default test timeout is 10 seconds.
let name = match n {
Some(nm) => String::from(nm),
None => {
tmp
}
};
let i_mode = InputMode {
width,
height,
rate,
display: String::from(&display),
name: name.clone()
};
let mut d_vec: Vec<String> = Vec::with_capacity(1);
d_vec.push(display.clone());
// compute CVT timings and delete xrandr mode concurrently; wait for deletion before adding to xrandr
//let del_handle = thread::spawn(move || delete_mode_xrandr(&name, Some(d_vec), verbose));
let cvt_handle = thread::spawn(move || gen_cvt_mode(&i_mode, verbose));
let fallback_cvt_handle = thread::spawn(move || gen_cvt_mode(¤t_modes[0], verbose));
//let _ = del_handle.join().unwrap();
let cvt = cvt_handle.join().unwrap();
let fallback_cvt = fallback_cvt_handle.join().unwrap();
new_mode(&cvt, &display, verbose)?;
if test {
test_mode(&cvt, &fallback_cvt, &display, t, verbose)?;
}
if save {
fileio::save_mode(&cvt,f,verbose)?
}
Ok(())
}
pub fn apply_mode(n: &str, d: &str, t: Option<&str>, test: bool, persist: bool, verbose: bool) -> Result<(), io::Error> {
println!("Applying mode {} to display {}.",n,d);
let mode = fileio::get_mode(n, None, verbose).unwrap();
if test {
let default_modes = get_default_modes(verbose)?;
let default_mode = gen_cvt_mode(&default_modes[0],verbose);
test_mode(&mode, &default_mode, d, t, verbose)?;
println!("Keep the mode you just tested? y/n");
let mut input = String::new();
while!(input.contains("y") || input.contains("n")) {
let _ = io::stdin().read_line(&mut input);
if input.contains("n") {
return Ok(());
}
}
}
switch_mode(n, d, verbose)?;
if persist {
fileio::save_mode_persistent(&mode, verbose)?;
}
Ok(())
}
fn test_mode(mode: &CvtMode, default_mode: &CvtMode, display: &str, t: Option<&str>, verbose: bool) -> Result<(), io::Error> {
let name = &mode.get_name();
let default_name = &default_mode.get_name();
let timeout: u64 = match t {
Some(time) => {
let tmp = match time.parse() {
Ok(kk) => kk,
Err(_) => {
eprintln!("Error: timeout must be an integer greater than zero. Using default timeout of 10 seconds.");
10 // just default to 10 secs if invalid timeout provided rather than returning an error
}
};
if tmp > 0 {
tmp
} else {
10 // default to 10 secs if none given
}
}
None => 10
};
let delay = time::Duration::from_secs(timeout);
if verbose {
println!("Testing mode {} on display {} for {} secs.", name, display, timeout);
thread::sleep(time::Duration::from_secs(1));
}
if verbose {
let _ = thread::spawn(move || util::print_countdown(timeout)); // this should maybe print regardless of verbose option, idk
}
let handle = thread::spawn(move || thread::sleep(delay));
switch_mode(name, display, verbose)?;
handle.join().expect("Timer thread had an error.");
if verbose {
println!("Reverting to mode {} on display {}.", default_name, display);
}
switch_mode(default_name, display, verbose)?;
Ok(())
}
fn gen_cvt_mode(input: &InputMode, verbose: bool) -> CvtMode {
if verbose {
println!("Generating coordinated video timings for mode {}",input.name);
}
let mut cmd = process::Command::new("cvt");
cmd.arg(&input.width).arg(&input.height).arg(&input.rate);
let output = cmd.output().unwrap();
let out = str::from_utf8(&output.stdout).unwrap();
let lines: Vec<_> = out.split('"').collect();
let mut t: Vec<_> = lines[2][2..lines[2].len()-1].split(" ").collect();
let mut i=0;
while i < t.len() {
if t[i] == "" || t[i] == "\t" {
t.remove(i);
} else {
i += 1;
}
}
let tmp = CvtMode {
name: input.name.to_owned(),
clock: String::from(t[0]),
h_disp: String::from(t[1]),
h_sync_start: String::from(t[2]),
h_sync_end: String::from(t[3]),
h_total: String::from(t[4]),
v_disp: String::from(t[5]),
v_sync_start: String::from(t[6]),
v_sync_end: String::from(t[7]),
v_total: String::from(t[8]),
flags: format!("{} {}",t[9],t[10]),
};
if verbose {
println!("{:?}",tmp);
}
tmp
}
// Retrieves modes which are currently in use
fn get_current_modes(verbose: bool) -> Result<Vec<InputMode>, Error> {
if verbose {
println!("Retrieving current display configuration.");
}
let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)\*").unwrap();
util::get_modes_helper(&re, verbose)
}
// Retrieves the default modes for each display
fn get_default_modes(verbose: bool) -> Result<Vec<InputMode>, Error> {
if verbose {
println!("Retrieving current display configuration.");
}
let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)[\*]?\+").unwrap();
util::get_modes_helper(&re, verbose)
}
fn switch_mode(name: &str, display: &str, verbose: bool) -> Result<(), io::Error> {
let mut cmd = process::Command::new("xrandr");
cmd.arg("--output").arg(&display).arg("--mode").arg(name);
if verbose {
println!("Applying mode {} to display {}",name,&display);
}
cmd.output()?;
if verbose {
println!("Successfully applied mode {} to display {}",name, &display);
}
Ok(())
}
// Adds the newly created mode to xrandr
fn new_mode(mode: &CvtMode, display: &str, verbose: bool) -> Result<(), io::Error> {
let mut cmd = process::Command::new("xrandr");
cmd.arg("--newmode")
.arg(&mode.name)
.arg(&mode.clock)
.arg(&mode.h_disp)
.arg(&mode.h_sync_start)
.arg(&mode.h_sync_end)
.arg(&mode.h_total)
.arg(&mode.v_disp)
.arg(&mode.v_sync_start)
.arg(&mode.v_sync_end)
.arg(&mode.v_total)
.arg(&mode.flags);
if verbose {
println!("Creating xrandr mode {}",&mode.name);
}
cmd.output()?;
if verbose {
println!("Adding mode {} for display {}.",&mode.name,display);
}
cmd = process::Command::new("xrandr");
cmd.arg("--addmode").arg(display).arg(&mode.name);
cmd.output()?;
Ok(())
}
| add_mode | identifier_name |
ic4164.rs | the column address is put onto the address pins and the active-low
/// column address strobe pin CAS is set low.
///
/// The chip has three basic modes of operation, controlled by the active-low write-enable
/// (WE) pin with some help from CAS. If WE is high, then the chip is in read mode after the
/// address is set. If WE is low, the mode depends on whether WE went low before the address
/// was set by putting CAS low; if CAS went low first, (meaning the chip was initially in
/// read mode), setting WE low will start read-modify-write mode, where the value at that
/// address is still available on the data-out pin (Q) even as the new value is set from the
/// data-in pin (D). If WE goes low before CAS, then read mode is never entered and write
/// mode is enabled instead. The value of D is still written to memory, but Q is
/// disconnected and no data is available there.
///
/// The Commodore 64 does not use read-modify-write mode. The WE pin is always set to its
/// proper level before the CAS pin goes low.
///
/// While WE and CAS control what is read from and/or written to the chip's memory, RAS is
/// not needed for anything other than setting the row address. Hence RAS can remain low
/// through multiple memory accesses, as long as its address is valid for all of them,
/// allowing reads and writes to happen within a single 256-address page of memory without
/// incurring the cost of resetting the row address. This doesn't happen in the C64; the
/// 6567 VIC cycles the RAS line once every clock cycle.
///
/// Unlike most other non-logic chips in the system, there is no dedicated chip-select pin.
/// The combination of RAS and CAS can be regarded as such a pin, and it is used that way in
/// the Commodore 64.
///
/// The chip comes in a 16-pin dual in-line package with the following pin assignments.
/// ```text
/// +---+--+---+
/// NC |1 +--+ 16| Vss
/// D |2 15| CAS
/// WE |3 14| Q
/// RAS |4 13| A6
/// A0 |5 4164 12| A3
/// A2 |6 11| A4
/// A1 |7 10| A5
/// Vcc |8 9| A7
/// +----------+
/// ```
/// These pin assignments are explained below.
///
/// | Pin | Name | Description |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 1 | NC | No connection. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 2 | D | Data input. This pin's value is written to memory when write mode is |
/// | | | entered. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 3 | WE | Active-low write enable. If this is low, memory is being written to. |
/// | | | If it is high, memory is being read. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 4 | RAS | Active-low row address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the row address for the internal 256x256 |
/// | | | memory array. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 5 | A0 | Address pins. These 8 pins in conjunction with RAS and CAS allow the |
/// | 6 | A2 | the addressing of 65,536 memory locations. |
/// | 7 | A1 | |
/// | 9 | A7 | |
/// | 10 | A5 | |
/// | 11 | A4 | |
/// | 12 | A3 | |
/// | 13 | A6 | |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 8 | Vcc | +5V power supply. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 14 | Q | Data output. The value of the memory at the latched location appears |
/// | | | on this pin when the CAS pin goes low in read mode. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 15 | CAS | Active-low column address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the column address for the internal 256x256 |
/// | | | memory array, and the location is either read from or written to, |
/// | | | depending on the value of WE. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 16 | Vss | 0V power supply (ground). Not emulated. |
///
/// In the Commodore 64, U9, U10, U11, U12, U21, U22, U23, and U24 are 4164s, one for each
/// of the 8 bits on the data bus.
pub struct Ic4164 {
/// The pins of the 4164, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
/// Separate references to the A0-A7 pins in the `pins` vector.
addr_pins: RefVec<Pin>,
/// The place where the data is actually stored. The 4164 is 1-bit memory that is stored
/// in a 256x256 matrix internally, but we don't have either u1 or u256 types (bools
/// don't count; they actually take up much more than 1 bit of memory space). Instead we
/// pack the bits into an array of 2048 u32s, which we then address through a function
/// that resolves the row and column into an array index and an index to the bit inside
/// the u32 value at that array index.
memory: [u32; 2048],
/// The latched row value taken from the pins when RAS transitions low. If no row has
/// been latched (RAS hasn't yet gone low), this will be `None`.
row: Option<u8>,
/// The latched column value taken from the pins when CAS transitions low. If no column
/// has been latched (CAS hasn't yet gone low), this will be `None`.
col: Option<u8>,
/// The latched data bit taken from the D pin. This is latched just before a write takes
/// place and is done so that its value can replace the Q pin's value in RMW mode
/// easily. If no data has been latched (either WE or CAS is not low), this will be
/// `None`.
data: Option<u8>,
}
impl Ic4164 {
/// Creates a new 4164 64k x 1 dynamic RAM emulation and returns a shared, internally
/// mutable reference to it.
pub fn new() -> DeviceRef {
// Address pins 0-7.
let a0 = pin!(A0, "A0", Input);
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
let a7 = pin!(A7, "A7", Input);
// The data input pin. When the chip is in write or read-modify-write mode, the
// value of this pin will be written to the appropriate bit in the memory array.
let d = pin!(D, "D", Input);
// The data output pin. This is active in read and read-modify-write mode, set to
// the value of the bit at the address latched by RAS and CAS. In write mode, it is
// hi-Z.
let q = pin!(Q, "Q", Output);
// The row address strobe. Setting this low latches the values of A0-A7, saving them
// to be part of the address used to access the memory array.
let ras = pin!(RAS, "RAS", Input);
// The column address strobe. Setting this low latches A0-A7 into the second part of
// the memory address. It also initiates read or write mode, depending on the value
// of WE.
let cas = pin!(CAS, "CAS", Input);
// The write-enable pin. If this is high, the chip is in read mode; if it and CAS
// are low, the chip is in either write or read-modify-write mode, depending on
// which pin went low first.
let we = pin!(WE, "WE", Input);
// Power supply and no-contact pins. These are not emulated.
let nc = pin!(NC, "NC", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let vss = pin!(VSS, "VSS", Unconnected);
let pins = pins![a0, a1, a2, a3, a4, a5, a6, a7, d, q, ras, cas, we, nc, vcc, vss];
let addr_pins = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|pa| clone_ref!(pins[pa]))
.collect::<Vec<PinRef>>(),
);
let device: DeviceRef = new_ref!(Ic4164 {
pins,
addr_pins,
memory: [0; 2048],
row: None,
col: None,
data: None,
});
float!(q);
attach_to!(device, ras, cas, we);
device
}
/// Reads the row and col and calculates the specific bit in the memory array to which
/// this row/col combination refers. The first element of the return value is the index
/// of the 32-bit number in the memory array where that bit resides; the second element
/// is the index of the bit within that 32-bit number.
fn resolve(&self) -> (usize, usize) {
// Unless there's a bug in this program, this method should never be called while
// either `self.row` or `self.col` are `None`. So we actually *want* it to panic if
// `unwrap()` fails.
let row = self.row.unwrap() as usize;
let col = self.col.unwrap() as usize;
let row_index = row << 3;
let col_index = (col & 0b1110_0000) >> 5;
let bit_index = col & 0b0001_1111;
(row_index | col_index, bit_index)
}
/// Retrieves a single bit from the memory array and sets the level of the Q pin to the
/// value of that bit.
fn read(&self) {
let (index, bit) = self.resolve();
let value = (self.memory[index] & (1 << bit)) >> bit;
set_level!(self.pins[Q], Some(value as f64))
}
/// Writes the value of the D pin to a single bit in the memory array. If the Q pin is
/// also connected, the value is also sent to it; this happens only in RMW mode and
/// keeps the input and output data pins synched. (This guaranteed sync means that the
/// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW
/// mode.)
fn write(&mut self) {
let (index, bit) = self.resolve();
if self.data.unwrap() == 1 {
self.memory[index] |= 1 << bit;
} else {
self.memory[index] &=!(1 << bit);
}
if!floating!(self.pins[Q]) {
set_level!(self.pins[Q], Some(self.data.unwrap() as f64));
}
}
}
impl Device for Ic4164 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
vec![]
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if number!(pin) == RAS => {
// Invoked when the RAS pin changes level. When it goes low, the current
// states of the A0-A7 pins are latched. The address is released when the
// RAS pin goes high.
//
// Since this is the only thing that RAS is used for, it can be left low for
// multiple memory accesses if its bits of the address remain the same for
// those accesses. This can speed up reads and writes within the same page
// by reducing the amount of setup needed for those reads and writes. (This
// does not happen in the C64.)
if high!(pin) {
self.row = None;
} else {
self.row = Some(pins_to_value(&self.addr_pins) as u8);
}
}
LevelChange(pin) if number!(pin) == CAS => {
// Invoked when the CAS pin changes level.
//
// When CAS goes low, the current states of the A0-A7 pins are latched in a
// smiliar way to when RAS goes low. What else happens depends on whether
// the WE pin is low. If it is, the chip goes into write mode and the value
// on the D pin is saved to a memory location referred to by the latched row
// and column values. If WE is not low, read mode is entered, and the value
// in that memory location is put onto the Q pin. (Setting the WE pin low
// after CAS goes low sets read-modify-write mode; the read that CAS
// initiated is still valid.)
//
// When CAS goes high, the Q pin is disconnected and the latched column and
// data (if there is one) values are cleared.
if high!(pin) | else {
self.col = Some(pins_to_value(&self.addr_pins) as u8);
if high!(self.pins[WE]) {
self.read();
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
LevelChange(pin) if number!(pin) == WE => {
// Invoked when the WE pin changes level.
//
// When WE is high, read mode is enabled (though the actual read will not be
// available until both RAS and CAS are set low, indicating that the address
// of the read is valid). The internal latched input data value is cleared.
//
// When WE goes low, the write mode that is enabled depends on whether CAS
// is already low. If it is, the chip must have been in read mode and now
// moves into read-modify-write mode. The data value on the Q pin remains
// valid, and the valus on the D pin is latched and stored at the
// appropriate memory location.
//
// If CAS is still high when WE goes low, the Q pin is disconnected. Nothing
// further happens until CAS goes low; at that point, the chip goes into
// write mode (data is written to memory but nothing is available to be
// read).
if high!(pin) {
self.data = None;
} else {
if high!(self.pins[CAS]) {
float!(self.pins[Q]);
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
_ => {}
}
}
fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data)
}
}
#[cfg(test)]
mod test {
use crate::{
components::trace::{Trace, TraceRef},
test_utils::{make_traces, value_to_traces},
};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) {
let device = Ic4164::new();
let tr = make_traces(&device);
set!(tr[WE]);
set!(tr[RAS]);
set!(tr[CAS]);
let addr_tr = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|p| clone_ref!(tr[p]))
.collect::<Vec<TraceRef>>(),
);
(device, tr, addr_tr)
}
#[test]
fn read_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[CAS]);
// data at 0x0000, which will be 0 initially
assert!(low!(tr[Q]), "Q should have data during read");
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after read");
}
#[test]
fn write_mode_disable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[WE]);
clear!(tr[CAS]);
assert!(floating!(tr[Q]), "Q should be disabled during write");
set!(tr[CAS]);
set!(tr[WE]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after write");
}
#[test]
fn rmw_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
clear!(tr[WE]);
assert!(low!(tr[Q]), "Q should be enabled during RMW");
set!(tr[WE]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after RMW");
}
#[test]
fn read_write_one_bit() {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(value, "Value 1 not written to address 0x0000");
}
#[test]
fn rmw_one_bit() {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// in read mode, Q should be 0 because no data has been written to 0x0000 yet
assert!(
low!(tr[Q]),
"Value 0 not read from address 0x0000 in RMW mode"
);
// Lower WE to go into RMW
clear!(tr[WE]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
| {
float!(self.pins[Q]);
self.col = None;
self.data = None;
} | conditional_block |
ic4164.rs | then the column address is put onto the address pins and the active-low
/// column address strobe pin CAS is set low.
///
/// The chip has three basic modes of operation, controlled by the active-low write-enable
/// (WE) pin with some help from CAS. If WE is high, then the chip is in read mode after the
/// address is set. If WE is low, the mode depends on whether WE went low before the address
/// was set by putting CAS low; if CAS went low first, (meaning the chip was initially in
/// read mode), setting WE low will start read-modify-write mode, where the value at that
/// address is still available on the data-out pin (Q) even as the new value is set from the
/// data-in pin (D). If WE goes low before CAS, then read mode is never entered and write
/// mode is enabled instead. The value of D is still written to memory, but Q is
/// disconnected and no data is available there.
///
/// The Commodore 64 does not use read-modify-write mode. The WE pin is always set to its
/// proper level before the CAS pin goes low.
///
/// While WE and CAS control what is read from and/or written to the chip's memory, RAS is
/// not needed for anything other than setting the row address. Hence RAS can remain low
/// through multiple memory accesses, as long as its address is valid for all of them,
/// allowing reads and writes to happen within a single 256-address page of memory without
/// incurring the cost of resetting the row address. This doesn't happen in the C64; the
/// 6567 VIC cycles the RAS line once every clock cycle.
///
/// Unlike most other non-logic chips in the system, there is no dedicated chip-select pin.
/// The combination of RAS and CAS can be regarded as such a pin, and it is used that way in
/// the Commodore 64.
///
/// The chip comes in a 16-pin dual in-line package with the following pin assignments.
/// ```text
/// +---+--+---+
/// NC |1 +--+ 16| Vss
/// D |2 15| CAS
/// WE |3 14| Q
/// RAS |4 13| A6
/// A0 |5 4164 12| A3
/// A2 |6 11| A4
/// A1 |7 10| A5
/// Vcc |8 9| A7
/// +----------+
/// ```
/// These pin assignments are explained below.
///
/// | Pin | Name | Description |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 1 | NC | No connection. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 2 | D | Data input. This pin's value is written to memory when write mode is |
/// | | | entered. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 3 | WE | Active-low write enable. If this is low, memory is being written to. |
/// | | | If it is high, memory is being read. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 4 | RAS | Active-low row address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the row address for the internal 256x256 |
/// | | | memory array. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 5 | A0 | Address pins. These 8 pins in conjunction with RAS and CAS allow the |
/// | 6 | A2 | the addressing of 65,536 memory locations. |
/// | 7 | A1 | |
/// | 9 | A7 | |
/// | 10 | A5 | |
/// | 11 | A4 | |
/// | 12 | A3 | |
/// | 13 | A6 | |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 8 | Vcc | +5V power supply. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 14 | Q | Data output. The value of the memory at the latched location appears |
/// | | | on this pin when the CAS pin goes low in read mode. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 15 | CAS | Active-low column address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the column address for the internal 256x256 |
/// | | | memory array, and the location is either read from or written to, |
/// | | | depending on the value of WE. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 16 | Vss | 0V power supply (ground). Not emulated. |
///
/// In the Commodore 64, U9, U10, U11, U12, U21, U22, U23, and U24 are 4164s, one for each
/// of the 8 bits on the data bus.
pub struct Ic4164 {
/// The pins of the 4164, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
/// Separate references to the A0-A7 pins in the `pins` vector.
addr_pins: RefVec<Pin>,
/// The place where the data is actually stored. The 4164 is 1-bit memory that is stored
/// in a 256x256 matrix internally, but we don't have either u1 or u256 types (bools
/// don't count; they actually take up much more than 1 bit of memory space). Instead we
/// pack the bits into an array of 2048 u32s, which we then address through a function
/// that resolves the row and column into an array index and an index to the bit inside
/// the u32 value at that array index.
memory: [u32; 2048],
/// The latched row value taken from the pins when RAS transitions low. If no row has
/// been latched (RAS hasn't yet gone low), this will be `None`.
row: Option<u8>,
/// The latched column value taken from the pins when CAS transitions low. If no column
/// has been latched (CAS hasn't yet gone low), this will be `None`.
col: Option<u8>,
/// The latched data bit taken from the D pin. This is latched just before a write takes
/// place and is done so that its value can replace the Q pin's value in RMW mode
/// easily. If no data has been latched (either WE or CAS is not low), this will be
/// `None`.
data: Option<u8>,
}
impl Ic4164 {
/// Creates a new 4164 64k x 1 dynamic RAM emulation and returns a shared, internally
/// mutable reference to it.
pub fn new() -> DeviceRef {
// Address pins 0-7.
let a0 = pin!(A0, "A0", Input);
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
let a7 = pin!(A7, "A7", Input);
// The data input pin. When the chip is in write or read-modify-write mode, the
// value of this pin will be written to the appropriate bit in the memory array.
let d = pin!(D, "D", Input);
// The data output pin. This is active in read and read-modify-write mode, set to
// the value of the bit at the address latched by RAS and CAS. In write mode, it is
// hi-Z.
let q = pin!(Q, "Q", Output);
// The row address strobe. Setting this low latches the values of A0-A7, saving them
// to be part of the address used to access the memory array.
let ras = pin!(RAS, "RAS", Input);
// The column address strobe. Setting this low latches A0-A7 into the second part of
// the memory address. It also initiates read or write mode, depending on the value
// of WE.
let cas = pin!(CAS, "CAS", Input);
// The write-enable pin. If this is high, the chip is in read mode; if it and CAS
// are low, the chip is in either write or read-modify-write mode, depending on
// which pin went low first.
let we = pin!(WE, "WE", Input);
// Power supply and no-contact pins. These are not emulated.
let nc = pin!(NC, "NC", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let vss = pin!(VSS, "VSS", Unconnected);
let pins = pins![a0, a1, a2, a3, a4, a5, a6, a7, d, q, ras, cas, we, nc, vcc, vss];
let addr_pins = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|pa| clone_ref!(pins[pa]))
.collect::<Vec<PinRef>>(),
);
let device: DeviceRef = new_ref!(Ic4164 {
pins,
addr_pins,
memory: [0; 2048],
row: None,
col: None,
data: None,
});
float!(q);
attach_to!(device, ras, cas, we);
device
}
/// Reads the row and col and calculates the specific bit in the memory array to which
/// this row/col combination refers. The first element of the return value is the index
/// of the 32-bit number in the memory array where that bit resides; the second element
/// is the index of the bit within that 32-bit number.
fn resolve(&self) -> (usize, usize) {
// Unless there's a bug in this program, this method should never be called while
// either `self.row` or `self.col` are `None`. So we actually *want* it to panic if
// `unwrap()` fails.
let row = self.row.unwrap() as usize;
let col = self.col.unwrap() as usize;
let row_index = row << 3;
let col_index = (col & 0b1110_0000) >> 5;
let bit_index = col & 0b0001_1111;
(row_index | col_index, bit_index)
}
/// Retrieves a single bit from the memory array and sets the level of the Q pin to the
/// value of that bit.
fn read(&self) {
let (index, bit) = self.resolve();
let value = (self.memory[index] & (1 << bit)) >> bit;
set_level!(self.pins[Q], Some(value as f64))
}
/// Writes the value of the D pin to a single bit in the memory array. If the Q pin is
/// also connected, the value is also sent to it; this happens only in RMW mode and
/// keeps the input and output data pins synched. (This guaranteed sync means that the
/// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW
/// mode.)
fn write(&mut self) {
let (index, bit) = self.resolve();
if self.data.unwrap() == 1 {
self.memory[index] |= 1 << bit;
} else {
self.memory[index] &=!(1 << bit);
}
if!floating!(self.pins[Q]) {
set_level!(self.pins[Q], Some(self.data.unwrap() as f64));
}
}
}
impl Device for Ic4164 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
vec![]
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if number!(pin) == RAS => {
// Invoked when the RAS pin changes level. When it goes low, the current
// states of the A0-A7 pins are latched. The address is released when the
// RAS pin goes high.
//
// Since this is the only thing that RAS is used for, it can be left low for
// multiple memory accesses if its bits of the address remain the same for
// those accesses. This can speed up reads and writes within the same page
// by reducing the amount of setup needed for those reads and writes. (This
// does not happen in the C64.)
if high!(pin) {
self.row = None;
} else {
self.row = Some(pins_to_value(&self.addr_pins) as u8);
}
}
LevelChange(pin) if number!(pin) == CAS => {
// Invoked when the CAS pin changes level.
//
// When CAS goes low, the current states of the A0-A7 pins are latched in a
// smiliar way to when RAS goes low. What else happens depends on whether
// the WE pin is low. If it is, the chip goes into write mode and the value
// on the D pin is saved to a memory location referred to by the latched row
// and column values. If WE is not low, read mode is entered, and the value
// in that memory location is put onto the Q pin. (Setting the WE pin low
// after CAS goes low sets read-modify-write mode; the read that CAS
// initiated is still valid.)
//
// When CAS goes high, the Q pin is disconnected and the latched column and
// data (if there is one) values are cleared.
if high!(pin) {
float!(self.pins[Q]);
self.col = None;
self.data = None;
} else {
self.col = Some(pins_to_value(&self.addr_pins) as u8);
if high!(self.pins[WE]) {
self.read();
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
LevelChange(pin) if number!(pin) == WE => {
// Invoked when the WE pin changes level.
//
// When WE is high, read mode is enabled (though the actual read will not be
// available until both RAS and CAS are set low, indicating that the address
// of the read is valid). The internal latched input data value is cleared.
//
// When WE goes low, the write mode that is enabled depends on whether CAS
// is already low. If it is, the chip must have been in read mode and now
// moves into read-modify-write mode. The data value on the Q pin remains
// valid, and the valus on the D pin is latched and stored at the
// appropriate memory location.
//
// If CAS is still high when WE goes low, the Q pin is disconnected. Nothing
// further happens until CAS goes low; at that point, the chip goes into
// write mode (data is written to memory but nothing is available to be
// read).
if high!(pin) {
self.data = None;
} else {
if high!(self.pins[CAS]) {
float!(self.pins[Q]);
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
_ => {}
}
}
fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data)
}
}
#[cfg(test)]
mod test {
use crate::{
components::trace::{Trace, TraceRef},
test_utils::{make_traces, value_to_traces},
};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) {
let device = Ic4164::new();
let tr = make_traces(&device);
set!(tr[WE]);
set!(tr[RAS]);
set!(tr[CAS]);
let addr_tr = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|p| clone_ref!(tr[p]))
.collect::<Vec<TraceRef>>(),
);
(device, tr, addr_tr)
}
#[test]
fn read_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[CAS]);
// data at 0x0000, which will be 0 initially
assert!(low!(tr[Q]), "Q should have data during read");
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after read");
}
#[test]
fn write_mode_disable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[WE]);
clear!(tr[CAS]);
assert!(floating!(tr[Q]), "Q should be disabled during write");
set!(tr[CAS]);
set!(tr[WE]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after write");
}
#[test]
fn rmw_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
clear!(tr[WE]);
assert!(low!(tr[Q]), "Q should be enabled during RMW");
set!(tr[WE]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after RMW");
}
#[test]
fn read_write_one_bit() {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(value, "Value 1 not written to address 0x0000");
}
#[test]
fn rmw_one_bit() {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// in read mode, Q should be 0 because no data has been written to 0x0000 yet
assert!(
low!(tr[Q]),
"Value 0 not read from address 0x0000 in RMW mode"
);
// Lower WE to go into RMW | // 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
| clear!(tr[WE]); | random_line_split |
ic4164.rs | the column address is put onto the address pins and the active-low
/// column address strobe pin CAS is set low.
///
/// The chip has three basic modes of operation, controlled by the active-low write-enable
/// (WE) pin with some help from CAS. If WE is high, then the chip is in read mode after the
/// address is set. If WE is low, the mode depends on whether WE went low before the address
/// was set by putting CAS low; if CAS went low first, (meaning the chip was initially in
/// read mode), setting WE low will start read-modify-write mode, where the value at that
/// address is still available on the data-out pin (Q) even as the new value is set from the
/// data-in pin (D). If WE goes low before CAS, then read mode is never entered and write
/// mode is enabled instead. The value of D is still written to memory, but Q is
/// disconnected and no data is available there.
///
/// The Commodore 64 does not use read-modify-write mode. The WE pin is always set to its
/// proper level before the CAS pin goes low.
///
/// While WE and CAS control what is read from and/or written to the chip's memory, RAS is
/// not needed for anything other than setting the row address. Hence RAS can remain low
/// through multiple memory accesses, as long as its address is valid for all of them,
/// allowing reads and writes to happen within a single 256-address page of memory without
/// incurring the cost of resetting the row address. This doesn't happen in the C64; the
/// 6567 VIC cycles the RAS line once every clock cycle.
///
/// Unlike most other non-logic chips in the system, there is no dedicated chip-select pin.
/// The combination of RAS and CAS can be regarded as such a pin, and it is used that way in
/// the Commodore 64.
///
/// The chip comes in a 16-pin dual in-line package with the following pin assignments.
/// ```text
/// +---+--+---+
/// NC |1 +--+ 16| Vss
/// D |2 15| CAS
/// WE |3 14| Q
/// RAS |4 13| A6
/// A0 |5 4164 12| A3
/// A2 |6 11| A4
/// A1 |7 10| A5
/// Vcc |8 9| A7
/// +----------+
/// ```
/// These pin assignments are explained below.
///
/// | Pin | Name | Description |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 1 | NC | No connection. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 2 | D | Data input. This pin's value is written to memory when write mode is |
/// | | | entered. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 3 | WE | Active-low write enable. If this is low, memory is being written to. |
/// | | | If it is high, memory is being read. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 4 | RAS | Active-low row address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the row address for the internal 256x256 |
/// | | | memory array. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 5 | A0 | Address pins. These 8 pins in conjunction with RAS and CAS allow the |
/// | 6 | A2 | the addressing of 65,536 memory locations. |
/// | 7 | A1 | |
/// | 9 | A7 | |
/// | 10 | A5 | |
/// | 11 | A4 | |
/// | 12 | A3 | |
/// | 13 | A6 | |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 8 | Vcc | +5V power supply. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 14 | Q | Data output. The value of the memory at the latched location appears |
/// | | | on this pin when the CAS pin goes low in read mode. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 15 | CAS | Active-low column address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the column address for the internal 256x256 |
/// | | | memory array, and the location is either read from or written to, |
/// | | | depending on the value of WE. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 16 | Vss | 0V power supply (ground). Not emulated. |
///
/// In the Commodore 64, U9, U10, U11, U12, U21, U22, U23, and U24 are 4164s, one for each
/// of the 8 bits on the data bus.
pub struct Ic4164 {
/// The pins of the 4164, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
/// Separate references to the A0-A7 pins in the `pins` vector.
addr_pins: RefVec<Pin>,
/// The place where the data is actually stored. The 4164 is 1-bit memory that is stored
/// in a 256x256 matrix internally, but we don't have either u1 or u256 types (bools
/// don't count; they actually take up much more than 1 bit of memory space). Instead we
/// pack the bits into an array of 2048 u32s, which we then address through a function
/// that resolves the row and column into an array index and an index to the bit inside
/// the u32 value at that array index.
memory: [u32; 2048],
/// The latched row value taken from the pins when RAS transitions low. If no row has
/// been latched (RAS hasn't yet gone low), this will be `None`.
row: Option<u8>,
/// The latched column value taken from the pins when CAS transitions low. If no column
/// has been latched (CAS hasn't yet gone low), this will be `None`.
col: Option<u8>,
/// The latched data bit taken from the D pin. This is latched just before a write takes
/// place and is done so that its value can replace the Q pin's value in RMW mode
/// easily. If no data has been latched (either WE or CAS is not low), this will be
/// `None`.
data: Option<u8>,
}
impl Ic4164 {
/// Creates a new 4164 64k x 1 dynamic RAM emulation and returns a shared, internally
/// mutable reference to it.
pub fn new() -> DeviceRef {
// Address pins 0-7.
let a0 = pin!(A0, "A0", Input);
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
let a7 = pin!(A7, "A7", Input);
// The data input pin. When the chip is in write or read-modify-write mode, the
// value of this pin will be written to the appropriate bit in the memory array.
let d = pin!(D, "D", Input);
// The data output pin. This is active in read and read-modify-write mode, set to
// the value of the bit at the address latched by RAS and CAS. In write mode, it is
// hi-Z.
let q = pin!(Q, "Q", Output);
// The row address strobe. Setting this low latches the values of A0-A7, saving them
// to be part of the address used to access the memory array.
let ras = pin!(RAS, "RAS", Input);
// The column address strobe. Setting this low latches A0-A7 into the second part of
// the memory address. It also initiates read or write mode, depending on the value
// of WE.
let cas = pin!(CAS, "CAS", Input);
// The write-enable pin. If this is high, the chip is in read mode; if it and CAS
// are low, the chip is in either write or read-modify-write mode, depending on
// which pin went low first.
let we = pin!(WE, "WE", Input);
// Power supply and no-contact pins. These are not emulated.
let nc = pin!(NC, "NC", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let vss = pin!(VSS, "VSS", Unconnected);
let pins = pins![a0, a1, a2, a3, a4, a5, a6, a7, d, q, ras, cas, we, nc, vcc, vss];
let addr_pins = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|pa| clone_ref!(pins[pa]))
.collect::<Vec<PinRef>>(),
);
let device: DeviceRef = new_ref!(Ic4164 {
pins,
addr_pins,
memory: [0; 2048],
row: None,
col: None,
data: None,
});
float!(q);
attach_to!(device, ras, cas, we);
device
}
/// Reads the row and col and calculates the specific bit in the memory array to which
/// this row/col combination refers. The first element of the return value is the index
/// of the 32-bit number in the memory array where that bit resides; the second element
/// is the index of the bit within that 32-bit number.
fn resolve(&self) -> (usize, usize) {
// Unless there's a bug in this program, this method should never be called while
// either `self.row` or `self.col` are `None`. So we actually *want* it to panic if
// `unwrap()` fails.
let row = self.row.unwrap() as usize;
let col = self.col.unwrap() as usize;
let row_index = row << 3;
let col_index = (col & 0b1110_0000) >> 5;
let bit_index = col & 0b0001_1111;
(row_index | col_index, bit_index)
}
/// Retrieves a single bit from the memory array and sets the level of the Q pin to the
/// value of that bit.
fn read(&self) {
let (index, bit) = self.resolve();
let value = (self.memory[index] & (1 << bit)) >> bit;
set_level!(self.pins[Q], Some(value as f64))
}
/// Writes the value of the D pin to a single bit in the memory array. If the Q pin is
/// also connected, the value is also sent to it; this happens only in RMW mode and
/// keeps the input and output data pins synched. (This guaranteed sync means that the
/// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW
/// mode.)
fn write(&mut self) {
let (index, bit) = self.resolve();
if self.data.unwrap() == 1 {
self.memory[index] |= 1 << bit;
} else {
self.memory[index] &=!(1 << bit);
}
if!floating!(self.pins[Q]) {
set_level!(self.pins[Q], Some(self.data.unwrap() as f64));
}
}
}
impl Device for Ic4164 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
vec![]
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if number!(pin) == RAS => {
// Invoked when the RAS pin changes level. When it goes low, the current
// states of the A0-A7 pins are latched. The address is released when the
// RAS pin goes high.
//
// Since this is the only thing that RAS is used for, it can be left low for
// multiple memory accesses if its bits of the address remain the same for
// those accesses. This can speed up reads and writes within the same page
// by reducing the amount of setup needed for those reads and writes. (This
// does not happen in the C64.)
if high!(pin) {
self.row = None;
} else {
self.row = Some(pins_to_value(&self.addr_pins) as u8);
}
}
LevelChange(pin) if number!(pin) == CAS => {
// Invoked when the CAS pin changes level.
//
// When CAS goes low, the current states of the A0-A7 pins are latched in a
// smiliar way to when RAS goes low. What else happens depends on whether
// the WE pin is low. If it is, the chip goes into write mode and the value
// on the D pin is saved to a memory location referred to by the latched row
// and column values. If WE is not low, read mode is entered, and the value
// in that memory location is put onto the Q pin. (Setting the WE pin low
// after CAS goes low sets read-modify-write mode; the read that CAS
// initiated is still valid.)
//
// When CAS goes high, the Q pin is disconnected and the latched column and
// data (if there is one) values are cleared.
if high!(pin) {
float!(self.pins[Q]);
self.col = None;
self.data = None;
} else {
self.col = Some(pins_to_value(&self.addr_pins) as u8);
if high!(self.pins[WE]) {
self.read();
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
LevelChange(pin) if number!(pin) == WE => {
// Invoked when the WE pin changes level.
//
// When WE is high, read mode is enabled (though the actual read will not be
// available until both RAS and CAS are set low, indicating that the address
// of the read is valid). The internal latched input data value is cleared.
//
// When WE goes low, the write mode that is enabled depends on whether CAS
// is already low. If it is, the chip must have been in read mode and now
// moves into read-modify-write mode. The data value on the Q pin remains
// valid, and the valus on the D pin is latched and stored at the
// appropriate memory location.
//
// If CAS is still high when WE goes low, the Q pin is disconnected. Nothing
// further happens until CAS goes low; at that point, the chip goes into
// write mode (data is written to memory but nothing is available to be
// read).
if high!(pin) {
self.data = None;
} else {
if high!(self.pins[CAS]) {
float!(self.pins[Q]);
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
_ => {}
}
}
fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data)
}
}
#[cfg(test)]
mod test {
use crate::{
components::trace::{Trace, TraceRef},
test_utils::{make_traces, value_to_traces},
};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) {
let device = Ic4164::new();
let tr = make_traces(&device);
set!(tr[WE]);
set!(tr[RAS]);
set!(tr[CAS]);
let addr_tr = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|p| clone_ref!(tr[p]))
.collect::<Vec<TraceRef>>(),
);
(device, tr, addr_tr)
}
#[test]
fn read_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[CAS]);
// data at 0x0000, which will be 0 initially
assert!(low!(tr[Q]), "Q should have data during read");
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after read");
}
#[test]
fn write_mode_disable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[WE]);
clear!(tr[CAS]);
assert!(floating!(tr[Q]), "Q should be disabled during write");
set!(tr[CAS]);
set!(tr[WE]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after write");
}
#[test]
fn rmw_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
clear!(tr[WE]);
assert!(low!(tr[Q]), "Q should be enabled during RMW");
set!(tr[WE]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after RMW");
}
#[test]
fn | () {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(value, "Value 1 not written to address 0x0000");
}
#[test]
fn rmw_one_bit() {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// in read mode, Q should be 0 because no data has been written to 0x0000 yet
assert!(
low!(tr[Q]),
"Value 0 not read from address 0x0000 in RMW mode"
);
// Lower WE to go into RMW
clear!(tr[WE]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
| read_write_one_bit | identifier_name |
ic4164.rs | the column address is put onto the address pins and the active-low
/// column address strobe pin CAS is set low.
///
/// The chip has three basic modes of operation, controlled by the active-low write-enable
/// (WE) pin with some help from CAS. If WE is high, then the chip is in read mode after the
/// address is set. If WE is low, the mode depends on whether WE went low before the address
/// was set by putting CAS low; if CAS went low first, (meaning the chip was initially in
/// read mode), setting WE low will start read-modify-write mode, where the value at that
/// address is still available on the data-out pin (Q) even as the new value is set from the
/// data-in pin (D). If WE goes low before CAS, then read mode is never entered and write
/// mode is enabled instead. The value of D is still written to memory, but Q is
/// disconnected and no data is available there.
///
/// The Commodore 64 does not use read-modify-write mode. The WE pin is always set to its
/// proper level before the CAS pin goes low.
///
/// While WE and CAS control what is read from and/or written to the chip's memory, RAS is
/// not needed for anything other than setting the row address. Hence RAS can remain low
/// through multiple memory accesses, as long as its address is valid for all of them,
/// allowing reads and writes to happen within a single 256-address page of memory without
/// incurring the cost of resetting the row address. This doesn't happen in the C64; the
/// 6567 VIC cycles the RAS line once every clock cycle.
///
/// Unlike most other non-logic chips in the system, there is no dedicated chip-select pin.
/// The combination of RAS and CAS can be regarded as such a pin, and it is used that way in
/// the Commodore 64.
///
/// The chip comes in a 16-pin dual in-line package with the following pin assignments.
/// ```text
/// +---+--+---+
/// NC |1 +--+ 16| Vss
/// D |2 15| CAS
/// WE |3 14| Q
/// RAS |4 13| A6
/// A0 |5 4164 12| A3
/// A2 |6 11| A4
/// A1 |7 10| A5
/// Vcc |8 9| A7
/// +----------+
/// ```
/// These pin assignments are explained below.
///
/// | Pin | Name | Description |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 1 | NC | No connection. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 2 | D | Data input. This pin's value is written to memory when write mode is |
/// | | | entered. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 3 | WE | Active-low write enable. If this is low, memory is being written to. |
/// | | | If it is high, memory is being read. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 4 | RAS | Active-low row address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the row address for the internal 256x256 |
/// | | | memory array. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 5 | A0 | Address pins. These 8 pins in conjunction with RAS and CAS allow the |
/// | 6 | A2 | the addressing of 65,536 memory locations. |
/// | 7 | A1 | |
/// | 9 | A7 | |
/// | 10 | A5 | |
/// | 11 | A4 | |
/// | 12 | A3 | |
/// | 13 | A6 | |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 8 | Vcc | +5V power supply. Not emulated. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 14 | Q | Data output. The value of the memory at the latched location appears |
/// | | | on this pin when the CAS pin goes low in read mode. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 15 | CAS | Active-low column address strobe. When this goes low, the value of the |
/// | | | address pins is stored as the column address for the internal 256x256 |
/// | | | memory array, and the location is either read from or written to, |
/// | | | depending on the value of WE. |
/// | --- | ----- | ---------------------------------------------------------------------- |
/// | 16 | Vss | 0V power supply (ground). Not emulated. |
///
/// In the Commodore 64, U9, U10, U11, U12, U21, U22, U23, and U24 are 4164s, one for each
/// of the 8 bits on the data bus.
pub struct Ic4164 {
/// The pins of the 4164, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
/// Separate references to the A0-A7 pins in the `pins` vector.
addr_pins: RefVec<Pin>,
/// The place where the data is actually stored. The 4164 is 1-bit memory that is stored
/// in a 256x256 matrix internally, but we don't have either u1 or u256 types (bools
/// don't count; they actually take up much more than 1 bit of memory space). Instead we
/// pack the bits into an array of 2048 u32s, which we then address through a function
/// that resolves the row and column into an array index and an index to the bit inside
/// the u32 value at that array index.
memory: [u32; 2048],
/// The latched row value taken from the pins when RAS transitions low. If no row has
/// been latched (RAS hasn't yet gone low), this will be `None`.
row: Option<u8>,
/// The latched column value taken from the pins when CAS transitions low. If no column
/// has been latched (CAS hasn't yet gone low), this will be `None`.
col: Option<u8>,
/// The latched data bit taken from the D pin. This is latched just before a write takes
/// place and is done so that its value can replace the Q pin's value in RMW mode
/// easily. If no data has been latched (either WE or CAS is not low), this will be
/// `None`.
data: Option<u8>,
}
impl Ic4164 {
/// Creates a new 4164 64k x 1 dynamic RAM emulation and returns a shared, internally
/// mutable reference to it.
pub fn new() -> DeviceRef {
// Address pins 0-7.
let a0 = pin!(A0, "A0", Input);
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
let a7 = pin!(A7, "A7", Input);
// The data input pin. When the chip is in write or read-modify-write mode, the
// value of this pin will be written to the appropriate bit in the memory array.
let d = pin!(D, "D", Input);
// The data output pin. This is active in read and read-modify-write mode, set to
// the value of the bit at the address latched by RAS and CAS. In write mode, it is
// hi-Z.
let q = pin!(Q, "Q", Output);
// The row address strobe. Setting this low latches the values of A0-A7, saving them
// to be part of the address used to access the memory array.
let ras = pin!(RAS, "RAS", Input);
// The column address strobe. Setting this low latches A0-A7 into the second part of
// the memory address. It also initiates read or write mode, depending on the value
// of WE.
let cas = pin!(CAS, "CAS", Input);
// The write-enable pin. If this is high, the chip is in read mode; if it and CAS
// are low, the chip is in either write or read-modify-write mode, depending on
// which pin went low first.
let we = pin!(WE, "WE", Input);
// Power supply and no-contact pins. These are not emulated.
let nc = pin!(NC, "NC", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let vss = pin!(VSS, "VSS", Unconnected);
let pins = pins![a0, a1, a2, a3, a4, a5, a6, a7, d, q, ras, cas, we, nc, vcc, vss];
let addr_pins = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|pa| clone_ref!(pins[pa]))
.collect::<Vec<PinRef>>(),
);
let device: DeviceRef = new_ref!(Ic4164 {
pins,
addr_pins,
memory: [0; 2048],
row: None,
col: None,
data: None,
});
float!(q);
attach_to!(device, ras, cas, we);
device
}
/// Reads the row and col and calculates the specific bit in the memory array to which
/// this row/col combination refers. The first element of the return value is the index
/// of the 32-bit number in the memory array where that bit resides; the second element
/// is the index of the bit within that 32-bit number.
fn resolve(&self) -> (usize, usize) {
// Unless there's a bug in this program, this method should never be called while
// either `self.row` or `self.col` are `None`. So we actually *want* it to panic if
// `unwrap()` fails.
let row = self.row.unwrap() as usize;
let col = self.col.unwrap() as usize;
let row_index = row << 3;
let col_index = (col & 0b1110_0000) >> 5;
let bit_index = col & 0b0001_1111;
(row_index | col_index, bit_index)
}
/// Retrieves a single bit from the memory array and sets the level of the Q pin to the
/// value of that bit.
fn read(&self) {
let (index, bit) = self.resolve();
let value = (self.memory[index] & (1 << bit)) >> bit;
set_level!(self.pins[Q], Some(value as f64))
}
/// Writes the value of the D pin to a single bit in the memory array. If the Q pin is
/// also connected, the value is also sent to it; this happens only in RMW mode and
/// keeps the input and output data pins synched. (This guaranteed sync means that the
/// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW
/// mode.)
fn write(&mut self) {
let (index, bit) = self.resolve();
if self.data.unwrap() == 1 {
self.memory[index] |= 1 << bit;
} else {
self.memory[index] &=!(1 << bit);
}
if!floating!(self.pins[Q]) {
set_level!(self.pins[Q], Some(self.data.unwrap() as f64));
}
}
}
impl Device for Ic4164 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
vec![]
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if number!(pin) == RAS => {
// Invoked when the RAS pin changes level. When it goes low, the current
// states of the A0-A7 pins are latched. The address is released when the
// RAS pin goes high.
//
// Since this is the only thing that RAS is used for, it can be left low for
// multiple memory accesses if its bits of the address remain the same for
// those accesses. This can speed up reads and writes within the same page
// by reducing the amount of setup needed for those reads and writes. (This
// does not happen in the C64.)
if high!(pin) {
self.row = None;
} else {
self.row = Some(pins_to_value(&self.addr_pins) as u8);
}
}
LevelChange(pin) if number!(pin) == CAS => {
// Invoked when the CAS pin changes level.
//
// When CAS goes low, the current states of the A0-A7 pins are latched in a
// smiliar way to when RAS goes low. What else happens depends on whether
// the WE pin is low. If it is, the chip goes into write mode and the value
// on the D pin is saved to a memory location referred to by the latched row
// and column values. If WE is not low, read mode is entered, and the value
// in that memory location is put onto the Q pin. (Setting the WE pin low
// after CAS goes low sets read-modify-write mode; the read that CAS
// initiated is still valid.)
//
// When CAS goes high, the Q pin is disconnected and the latched column and
// data (if there is one) values are cleared.
if high!(pin) {
float!(self.pins[Q]);
self.col = None;
self.data = None;
} else {
self.col = Some(pins_to_value(&self.addr_pins) as u8);
if high!(self.pins[WE]) {
self.read();
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
LevelChange(pin) if number!(pin) == WE => {
// Invoked when the WE pin changes level.
//
// When WE is high, read mode is enabled (though the actual read will not be
// available until both RAS and CAS are set low, indicating that the address
// of the read is valid). The internal latched input data value is cleared.
//
// When WE goes low, the write mode that is enabled depends on whether CAS
// is already low. If it is, the chip must have been in read mode and now
// moves into read-modify-write mode. The data value on the Q pin remains
// valid, and the valus on the D pin is latched and stored at the
// appropriate memory location.
//
// If CAS is still high when WE goes low, the Q pin is disconnected. Nothing
// further happens until CAS goes low; at that point, the chip goes into
// write mode (data is written to memory but nothing is available to be
// read).
if high!(pin) {
self.data = None;
} else {
if high!(self.pins[CAS]) {
float!(self.pins[Q]);
} else {
self.data = Some(if high!(self.pins[D]) { 1 } else { 0 });
self.write();
}
}
}
_ => {}
}
}
fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data)
}
}
#[cfg(test)]
mod test {
use crate::{
components::trace::{Trace, TraceRef},
test_utils::{make_traces, value_to_traces},
};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) {
let device = Ic4164::new();
let tr = make_traces(&device);
set!(tr[WE]);
set!(tr[RAS]);
set!(tr[CAS]);
let addr_tr = RefVec::with_vec(
IntoIterator::into_iter(PA_ADDRESS)
.map(|p| clone_ref!(tr[p]))
.collect::<Vec<TraceRef>>(),
);
(device, tr, addr_tr)
}
#[test]
fn read_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[CAS]);
// data at 0x0000, which will be 0 initially
assert!(low!(tr[Q]), "Q should have data during read");
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after read");
}
#[test]
fn write_mode_disable_q() {
let (_, tr, _) = before_each();
clear!(tr[RAS]);
clear!(tr[WE]);
clear!(tr[CAS]);
assert!(floating!(tr[Q]), "Q should be disabled during write");
set!(tr[CAS]);
set!(tr[WE]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after write");
}
#[test]
fn rmw_mode_enable_q() {
let (_, tr, _) = before_each();
clear!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
clear!(tr[WE]);
assert!(low!(tr[Q]), "Q should be enabled during RMW");
set!(tr[WE]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(floating!(tr[Q]), "Q should be disabled after RMW");
}
#[test]
fn read_write_one_bit() {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]);
clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
assert!(value, "Value 1 not written to address 0x0000");
}
#[test]
fn rmw_one_bit() | clear!(tr[CAS]);
let value = high!(tr[Q]);
set!(tr[CAS]);
set!(tr[RAS]);
| {
let (_, tr, _) = before_each();
// Write is happening at 0x0000, so we don't need to set addresses at all
set!(tr[D]);
clear!(tr[RAS]);
clear!(tr[CAS]);
// in read mode, Q should be 0 because no data has been written to 0x0000 yet
assert!(
low!(tr[Q]),
"Value 0 not read from address 0x0000 in RMW mode"
);
// Lower WE to go into RMW
clear!(tr[WE]);
// 1 is written to address 0x0000 at this point
set!(tr[CAS]);
set!(tr[RAS]);
set!(tr[WE]);
clear!(tr[RAS]); | identifier_body |
corebuilder.rs | use crate::{
error::CoreError,
logger::Logger,
warrior::{Instruction, Warrior},
};
use rand::Rng;
use super::{Core, CoreInstruction};
use std::collections::VecDeque;
#[derive(Debug)]
pub struct CoreBuilder {
pub(super) core_size: usize,
pub(super) cycles_before_tie: usize,
pub(super) initial_instruction: InitialInstruction,
pub(super) instruction_limit: usize,
pub(super) maximum_number_of_tasks: usize,
pub(super) minimum_separation: usize,
pub(super) read_distance: usize,
pub(super) write_distance: usize,
pub(super) separation: Separation,
pub(super) warriors: Vec<Warrior>,
pub(super) logger: Option<Box<dyn Logger>>,
}
impl Default for CoreBuilder {
fn default() -> Self {
Self {
core_size: 8000,
cycles_before_tie: 80_000,
initial_instruction: InitialInstruction::Fixed(Instruction::default()),
instruction_limit: 100,
maximum_number_of_tasks: 8000,
minimum_separation: 100,
read_distance: 8000,
write_distance: 8000,
separation: Separation::Random(100),
warriors: Vec::new(),
logger: None,
}
}
}
impl CoreBuilder {
/// Creates a new instance of CoreBuilder with default parameters and no warriors.
pub fn new() -> Self {
CoreBuilder::default()
}
/// Sets the core's size. Core size is the number of instructions which make up the core
/// during the battle.
pub fn core_size(&mut self, core_size: usize) -> &mut Self {
self.core_size = core_size;
self
}
/// Sets the number of cycles that the match can last for before it is declared a tie.
pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self {
self.cycles_before_tie = cycles_before_tie;
self
}
/// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. In addition to loading
/// an instruction such as "DAT #0, #0" into all of core, the
/// initial instruction could be set to `Random`, meaning core
/// instructions are filled with randomly generated instructions.
pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self {
self.initial_instruction = initial_instruction;
self
}
/// The maximum number of instructions allowed per warrior.
pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self {
self.instruction_limit = instruction_limit;
self
}
/// Each warrior can spawn multiple additional tasks. This variable sets the maximum
/// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue.
pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self {
self.maximum_number_of_tasks = maximum_number_of_tasks;
self
}
/// The minimum number of instructions from the first instruction
/// of one warrior to the first instruction of the next warrior.
pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self {
self.minimum_separation = minimum_separation;
// Need to put some limit on this related to number of warriors.
self
}
/// This is the range available for warriors to read information
/// from core. Attempts to read outside the limits of this range
/// result in reading within the local readable range. The range
/// is centered on the current instruction. Thus, a range of
/// 500 limits reading to offsets of (-249 -> +250) from the
/// currently executing instruction. The read limit can therefore
/// be considered a mini-core within core. An attempt to read
/// location PC+251 reads location PC-249 instead. An attempt to
/// read location PC+500 reads location PC instead.
///
/// Read distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn read_distance(&mut self, read_distance: usize) -> &mut Self {
self.read_distance = read_distance;
self
}
/// The number of instructions from the first instruction of one | self.separation = separation;
self
}
/// This is the range available for warriors to write information
/// to core. Attempts to write outside the limits of this range
/// result in writing within the local writable range. The range
/// is centered on the current instruction. Thus, a range of 500
/// limits writing to offsets of (-249 -> +250) from the
/// currently executing instruction. The write limit can
/// therefore be considered a mini-core within core. An attempt
/// to write location PC+251 writes to location PC-249 instead.
/// An attempt to write to location PC+500 writes to location PC
/// instead.
///
/// Write distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn write_distance(&mut self, write_distance: usize) -> &mut Self {
self.write_distance = write_distance;
self
}
pub fn load_warriors(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> {
for warrior in warriors {
if warrior.len() > self.instruction_limit {
return Err(CoreError::WarriorTooLong(
warrior.len(),
self.instruction_limit,
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
}
if warrior.is_empty() {
return Err(CoreError::EmptyWarrior(
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
};
}
self.warriors = warriors.to_vec();
Ok(self)
}
/// Use a `Logger` to log the battle's output.
pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self {
self.logger = Some(logger);
self
}
/// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct.
pub fn build(&self) -> Result<Core, CoreError> {
let CoreBuilder {
initial_instruction,
separation,
warriors,
maximum_number_of_tasks,
core_size,
instruction_limit,
..
} = self;
let mut core_instructions = vec![
CoreInstruction::from_instruction(
initial_instruction.clone().extract(),
*core_size
);
*core_size
];
let separation = separation.clone();
let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect();
match separation {
Separation::Random(min_separation) => {
let offsets =
random_offsets(&warriors, min_separation, *instruction_limit, *core_size);
for (i, (offset, warrior)) in offsets.iter().enumerate() {
let mut ptr = *offset;
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
}
}
Separation::Fixed(separation) => {
let mut ptr = 0_usize;
for (i, warrior) in warriors.iter().enumerate() {
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
ptr = Core::fold(ptr + separation, *core_size, *core_size);
}
}
};
let task_queues = warrior_offsets
.iter()
.zip(warriors)
.map(|(&offset, warrior)| {
let mut v = VecDeque::with_capacity(*maximum_number_of_tasks);
let offset = Core::fold(offset, *core_size, *core_size);
v.push_back(offset);
(warrior, v)
})
.collect();
Ok(Core {
core: self,
instructions: core_instructions,
task_queues,
current_queue: 0,
cycle_count: 0,
})
}
}
/// The separation between warriors at the start of a match.
///
/// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior.
/// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation.
#[derive(Debug, Clone)]
pub enum Separation {
Random(usize),
Fixed(usize),
}
/// The value to which the core's memory addresses are initialised
/// at the beginning of the match.
///
/// The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. If set to `Random`, core
/// instructions are filled with randomly generated instructions.
#[derive(Debug, Clone)]
pub enum InitialInstruction {
Random,
Fixed(Instruction),
}
impl InitialInstruction {
/// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`.
pub fn extract(self) -> Instruction {
match self {
Self::Random => todo!(),
Self::Fixed(instr) => instr,
}
}
}
fn random_offsets(
warriors: &[Warrior],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> Vec<(usize, &Warrior)> {
let mut offsets: Vec<(usize, &Warrior)> = Vec::new();
for warrior in warriors {
let offset_addresses: Vec<usize> = offsets.iter().map(|x| x.0).collect();
let offset = get_valid_address(
&offset_addresses,
minimum_separation,
instruction_limit,
core_size,
);
offsets.push((offset, warrior));
}
offsets
}
fn get_valid_address(
offsets: &[usize],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> usize {
let diff = |x, y| {
if x > y {
x - y
} else {
((core_size - 1) + y) - x
}
};
let ptr: usize;
let mut rng = rand::thread_rng();
// This will run forever if we can't fit a warrior...
'outer: loop {
let address: usize = rng.gen_range(0, core_size);
for offset in offsets {
let lb = diff(address + instruction_limit, *offset);
let ub = diff(offset + instruction_limit, address);
if (lb <= minimum_separation) || (ub <= minimum_separation) {
continue 'outer;
}
}
ptr = address;
break;
}
ptr
}
#[cfg(test)]
mod test {
use super::*;
use std::convert::TryFrom;
#[test]
fn random_addresses() {
let imp = Warrior::parse(include_str!("../../warriors/imp.red"), 0).unwrap();
let stone = Warrior::parse(include_str!("../../warriors/stone.red"), 0).unwrap();
let imp2 = imp.clone();
let stone2 = stone.clone();
let imp3 = imp.clone();
let stone3 = stone.clone();
let warriors = vec![imp, stone, imp2, stone2, imp3, stone3];
for _ in 0..5000 {
let offsets = random_offsets(&warriors, 100, 100, 8000);
assert_eq!(offsets.len(), 6);
for offset in &offsets {
let mut ok = true;
for other in &offsets {
if offset.1!= other.1 {
let o1 = i64::try_from(offset.0).unwrap();
let o2 = i64::try_from(other.0).unwrap();
if i64::abs(o1 - o2) < 100 {
ok = false;
break;
}
}
}
assert!(ok);
}
}
}
} | /// warrior to the first instruction of the next warrior.
/// Separation can be set to `Random`, meaning separations will be
/// chosen randomly from those larger than the minimum separation.
pub fn separation(&mut self, separation: Separation) -> &mut Self { | random_line_split |
corebuilder.rs | use crate::{
error::CoreError,
logger::Logger,
warrior::{Instruction, Warrior},
};
use rand::Rng;
use super::{Core, CoreInstruction};
use std::collections::VecDeque;
#[derive(Debug)]
pub struct CoreBuilder {
pub(super) core_size: usize,
pub(super) cycles_before_tie: usize,
pub(super) initial_instruction: InitialInstruction,
pub(super) instruction_limit: usize,
pub(super) maximum_number_of_tasks: usize,
pub(super) minimum_separation: usize,
pub(super) read_distance: usize,
pub(super) write_distance: usize,
pub(super) separation: Separation,
pub(super) warriors: Vec<Warrior>,
pub(super) logger: Option<Box<dyn Logger>>,
}
impl Default for CoreBuilder {
fn default() -> Self {
Self {
core_size: 8000,
cycles_before_tie: 80_000,
initial_instruction: InitialInstruction::Fixed(Instruction::default()),
instruction_limit: 100,
maximum_number_of_tasks: 8000,
minimum_separation: 100,
read_distance: 8000,
write_distance: 8000,
separation: Separation::Random(100),
warriors: Vec::new(),
logger: None,
}
}
}
impl CoreBuilder {
/// Creates a new instance of CoreBuilder with default parameters and no warriors.
pub fn new() -> Self {
CoreBuilder::default()
}
/// Sets the core's size. Core size is the number of instructions which make up the core
/// during the battle.
pub fn core_size(&mut self, core_size: usize) -> &mut Self {
self.core_size = core_size;
self
}
/// Sets the number of cycles that the match can last for before it is declared a tie.
pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self {
self.cycles_before_tie = cycles_before_tie;
self
}
/// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. In addition to loading
/// an instruction such as "DAT #0, #0" into all of core, the
/// initial instruction could be set to `Random`, meaning core
/// instructions are filled with randomly generated instructions.
pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self {
self.initial_instruction = initial_instruction;
self
}
/// The maximum number of instructions allowed per warrior.
pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self {
self.instruction_limit = instruction_limit;
self
}
/// Each warrior can spawn multiple additional tasks. This variable sets the maximum
/// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue.
pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self {
self.maximum_number_of_tasks = maximum_number_of_tasks;
self
}
/// The minimum number of instructions from the first instruction
/// of one warrior to the first instruction of the next warrior.
pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self {
self.minimum_separation = minimum_separation;
// Need to put some limit on this related to number of warriors.
self
}
/// This is the range available for warriors to read information
/// from core. Attempts to read outside the limits of this range
/// result in reading within the local readable range. The range
/// is centered on the current instruction. Thus, a range of
/// 500 limits reading to offsets of (-249 -> +250) from the
/// currently executing instruction. The read limit can therefore
/// be considered a mini-core within core. An attempt to read
/// location PC+251 reads location PC-249 instead. An attempt to
/// read location PC+500 reads location PC instead.
///
/// Read distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn read_distance(&mut self, read_distance: usize) -> &mut Self {
self.read_distance = read_distance;
self
}
/// The number of instructions from the first instruction of one
/// warrior to the first instruction of the next warrior.
/// Separation can be set to `Random`, meaning separations will be
/// chosen randomly from those larger than the minimum separation.
pub fn separation(&mut self, separation: Separation) -> &mut Self {
self.separation = separation;
self
}
/// This is the range available for warriors to write information
/// to core. Attempts to write outside the limits of this range
/// result in writing within the local writable range. The range
/// is centered on the current instruction. Thus, a range of 500
/// limits writing to offsets of (-249 -> +250) from the
/// currently executing instruction. The write limit can
/// therefore be considered a mini-core within core. An attempt
/// to write location PC+251 writes to location PC-249 instead.
/// An attempt to write to location PC+500 writes to location PC
/// instead.
///
/// Write distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn write_distance(&mut self, write_distance: usize) -> &mut Self {
self.write_distance = write_distance;
self
}
pub fn | (&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> {
for warrior in warriors {
if warrior.len() > self.instruction_limit {
return Err(CoreError::WarriorTooLong(
warrior.len(),
self.instruction_limit,
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
}
if warrior.is_empty() {
return Err(CoreError::EmptyWarrior(
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
};
}
self.warriors = warriors.to_vec();
Ok(self)
}
/// Use a `Logger` to log the battle's output.
pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self {
self.logger = Some(logger);
self
}
/// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct.
pub fn build(&self) -> Result<Core, CoreError> {
let CoreBuilder {
initial_instruction,
separation,
warriors,
maximum_number_of_tasks,
core_size,
instruction_limit,
..
} = self;
let mut core_instructions = vec![
CoreInstruction::from_instruction(
initial_instruction.clone().extract(),
*core_size
);
*core_size
];
let separation = separation.clone();
let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect();
match separation {
Separation::Random(min_separation) => {
let offsets =
random_offsets(&warriors, min_separation, *instruction_limit, *core_size);
for (i, (offset, warrior)) in offsets.iter().enumerate() {
let mut ptr = *offset;
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
}
}
Separation::Fixed(separation) => {
let mut ptr = 0_usize;
for (i, warrior) in warriors.iter().enumerate() {
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
ptr = Core::fold(ptr + separation, *core_size, *core_size);
}
}
};
let task_queues = warrior_offsets
.iter()
.zip(warriors)
.map(|(&offset, warrior)| {
let mut v = VecDeque::with_capacity(*maximum_number_of_tasks);
let offset = Core::fold(offset, *core_size, *core_size);
v.push_back(offset);
(warrior, v)
})
.collect();
Ok(Core {
core: self,
instructions: core_instructions,
task_queues,
current_queue: 0,
cycle_count: 0,
})
}
}
/// The separation between warriors at the start of a match.
///
/// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior.
/// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation.
#[derive(Debug, Clone)]
pub enum Separation {
Random(usize),
Fixed(usize),
}
/// The value to which the core's memory addresses are initialised
/// at the beginning of the match.
///
/// The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. If set to `Random`, core
/// instructions are filled with randomly generated instructions.
#[derive(Debug, Clone)]
pub enum InitialInstruction {
Random,
Fixed(Instruction),
}
impl InitialInstruction {
/// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`.
pub fn extract(self) -> Instruction {
match self {
Self::Random => todo!(),
Self::Fixed(instr) => instr,
}
}
}
fn random_offsets(
warriors: &[Warrior],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> Vec<(usize, &Warrior)> {
let mut offsets: Vec<(usize, &Warrior)> = Vec::new();
for warrior in warriors {
let offset_addresses: Vec<usize> = offsets.iter().map(|x| x.0).collect();
let offset = get_valid_address(
&offset_addresses,
minimum_separation,
instruction_limit,
core_size,
);
offsets.push((offset, warrior));
}
offsets
}
fn get_valid_address(
offsets: &[usize],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> usize {
let diff = |x, y| {
if x > y {
x - y
} else {
((core_size - 1) + y) - x
}
};
let ptr: usize;
let mut rng = rand::thread_rng();
// This will run forever if we can't fit a warrior...
'outer: loop {
let address: usize = rng.gen_range(0, core_size);
for offset in offsets {
let lb = diff(address + instruction_limit, *offset);
let ub = diff(offset + instruction_limit, address);
if (lb <= minimum_separation) || (ub <= minimum_separation) {
continue 'outer;
}
}
ptr = address;
break;
}
ptr
}
#[cfg(test)]
mod test {
use super::*;
use std::convert::TryFrom;
#[test]
fn random_addresses() {
let imp = Warrior::parse(include_str!("../../warriors/imp.red"), 0).unwrap();
let stone = Warrior::parse(include_str!("../../warriors/stone.red"), 0).unwrap();
let imp2 = imp.clone();
let stone2 = stone.clone();
let imp3 = imp.clone();
let stone3 = stone.clone();
let warriors = vec![imp, stone, imp2, stone2, imp3, stone3];
for _ in 0..5000 {
let offsets = random_offsets(&warriors, 100, 100, 8000);
assert_eq!(offsets.len(), 6);
for offset in &offsets {
let mut ok = true;
for other in &offsets {
if offset.1!= other.1 {
let o1 = i64::try_from(offset.0).unwrap();
let o2 = i64::try_from(other.0).unwrap();
if i64::abs(o1 - o2) < 100 {
ok = false;
break;
}
}
}
assert!(ok);
}
}
}
}
| load_warriors | identifier_name |
corebuilder.rs | use crate::{
error::CoreError,
logger::Logger,
warrior::{Instruction, Warrior},
};
use rand::Rng;
use super::{Core, CoreInstruction};
use std::collections::VecDeque;
#[derive(Debug)]
pub struct CoreBuilder {
pub(super) core_size: usize,
pub(super) cycles_before_tie: usize,
pub(super) initial_instruction: InitialInstruction,
pub(super) instruction_limit: usize,
pub(super) maximum_number_of_tasks: usize,
pub(super) minimum_separation: usize,
pub(super) read_distance: usize,
pub(super) write_distance: usize,
pub(super) separation: Separation,
pub(super) warriors: Vec<Warrior>,
pub(super) logger: Option<Box<dyn Logger>>,
}
impl Default for CoreBuilder {
fn default() -> Self {
Self {
core_size: 8000,
cycles_before_tie: 80_000,
initial_instruction: InitialInstruction::Fixed(Instruction::default()),
instruction_limit: 100,
maximum_number_of_tasks: 8000,
minimum_separation: 100,
read_distance: 8000,
write_distance: 8000,
separation: Separation::Random(100),
warriors: Vec::new(),
logger: None,
}
}
}
impl CoreBuilder {
/// Creates a new instance of CoreBuilder with default parameters and no warriors.
pub fn new() -> Self {
CoreBuilder::default()
}
/// Sets the core's size. Core size is the number of instructions which make up the core
/// during the battle.
pub fn core_size(&mut self, core_size: usize) -> &mut Self {
self.core_size = core_size;
self
}
/// Sets the number of cycles that the match can last for before it is declared a tie.
pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self {
self.cycles_before_tie = cycles_before_tie;
self
}
/// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. In addition to loading
/// an instruction such as "DAT #0, #0" into all of core, the
/// initial instruction could be set to `Random`, meaning core
/// instructions are filled with randomly generated instructions.
pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self {
self.initial_instruction = initial_instruction;
self
}
/// The maximum number of instructions allowed per warrior.
pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self |
/// Each warrior can spawn multiple additional tasks. This variable sets the maximum
/// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue.
pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self {
self.maximum_number_of_tasks = maximum_number_of_tasks;
self
}
/// The minimum number of instructions from the first instruction
/// of one warrior to the first instruction of the next warrior.
pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self {
self.minimum_separation = minimum_separation;
// Need to put some limit on this related to number of warriors.
self
}
/// This is the range available for warriors to read information
/// from core. Attempts to read outside the limits of this range
/// result in reading within the local readable range. The range
/// is centered on the current instruction. Thus, a range of
/// 500 limits reading to offsets of (-249 -> +250) from the
/// currently executing instruction. The read limit can therefore
/// be considered a mini-core within core. An attempt to read
/// location PC+251 reads location PC-249 instead. An attempt to
/// read location PC+500 reads location PC instead.
///
/// Read distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn read_distance(&mut self, read_distance: usize) -> &mut Self {
self.read_distance = read_distance;
self
}
/// The number of instructions from the first instruction of one
/// warrior to the first instruction of the next warrior.
/// Separation can be set to `Random`, meaning separations will be
/// chosen randomly from those larger than the minimum separation.
pub fn separation(&mut self, separation: Separation) -> &mut Self {
self.separation = separation;
self
}
/// This is the range available for warriors to write information
/// to core. Attempts to write outside the limits of this range
/// result in writing within the local writable range. The range
/// is centered on the current instruction. Thus, a range of 500
/// limits writing to offsets of (-249 -> +250) from the
/// currently executing instruction. The write limit can
/// therefore be considered a mini-core within core. An attempt
/// to write location PC+251 writes to location PC-249 instead.
/// An attempt to write to location PC+500 writes to location PC
/// instead.
///
/// Write distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn write_distance(&mut self, write_distance: usize) -> &mut Self {
self.write_distance = write_distance;
self
}
pub fn load_warriors(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> {
for warrior in warriors {
if warrior.len() > self.instruction_limit {
return Err(CoreError::WarriorTooLong(
warrior.len(),
self.instruction_limit,
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
}
if warrior.is_empty() {
return Err(CoreError::EmptyWarrior(
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
};
}
self.warriors = warriors.to_vec();
Ok(self)
}
/// Use a `Logger` to log the battle's output.
pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self {
self.logger = Some(logger);
self
}
/// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct.
pub fn build(&self) -> Result<Core, CoreError> {
let CoreBuilder {
initial_instruction,
separation,
warriors,
maximum_number_of_tasks,
core_size,
instruction_limit,
..
} = self;
let mut core_instructions = vec![
CoreInstruction::from_instruction(
initial_instruction.clone().extract(),
*core_size
);
*core_size
];
let separation = separation.clone();
let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect();
match separation {
Separation::Random(min_separation) => {
let offsets =
random_offsets(&warriors, min_separation, *instruction_limit, *core_size);
for (i, (offset, warrior)) in offsets.iter().enumerate() {
let mut ptr = *offset;
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
}
}
Separation::Fixed(separation) => {
let mut ptr = 0_usize;
for (i, warrior) in warriors.iter().enumerate() {
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
ptr = Core::fold(ptr + separation, *core_size, *core_size);
}
}
};
let task_queues = warrior_offsets
.iter()
.zip(warriors)
.map(|(&offset, warrior)| {
let mut v = VecDeque::with_capacity(*maximum_number_of_tasks);
let offset = Core::fold(offset, *core_size, *core_size);
v.push_back(offset);
(warrior, v)
})
.collect();
Ok(Core {
core: self,
instructions: core_instructions,
task_queues,
current_queue: 0,
cycle_count: 0,
})
}
}
/// The separation between warriors at the start of a match.
///
/// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior.
/// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation.
#[derive(Debug, Clone)]
pub enum Separation {
Random(usize),
Fixed(usize),
}
/// The value to which the core's memory addresses are initialised
/// at the beginning of the match.
///
/// The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. If set to `Random`, core
/// instructions are filled with randomly generated instructions.
#[derive(Debug, Clone)]
pub enum InitialInstruction {
Random,
Fixed(Instruction),
}
impl InitialInstruction {
/// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`.
pub fn extract(self) -> Instruction {
match self {
Self::Random => todo!(),
Self::Fixed(instr) => instr,
}
}
}
fn random_offsets(
warriors: &[Warrior],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> Vec<(usize, &Warrior)> {
let mut offsets: Vec<(usize, &Warrior)> = Vec::new();
for warrior in warriors {
let offset_addresses: Vec<usize> = offsets.iter().map(|x| x.0).collect();
let offset = get_valid_address(
&offset_addresses,
minimum_separation,
instruction_limit,
core_size,
);
offsets.push((offset, warrior));
}
offsets
}
fn get_valid_address(
offsets: &[usize],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> usize {
let diff = |x, y| {
if x > y {
x - y
} else {
((core_size - 1) + y) - x
}
};
let ptr: usize;
let mut rng = rand::thread_rng();
// This will run forever if we can't fit a warrior...
'outer: loop {
let address: usize = rng.gen_range(0, core_size);
for offset in offsets {
let lb = diff(address + instruction_limit, *offset);
let ub = diff(offset + instruction_limit, address);
if (lb <= minimum_separation) || (ub <= minimum_separation) {
continue 'outer;
}
}
ptr = address;
break;
}
ptr
}
#[cfg(test)]
mod test {
use super::*;
use std::convert::TryFrom;
#[test]
fn random_addresses() {
let imp = Warrior::parse(include_str!("../../warriors/imp.red"), 0).unwrap();
let stone = Warrior::parse(include_str!("../../warriors/stone.red"), 0).unwrap();
let imp2 = imp.clone();
let stone2 = stone.clone();
let imp3 = imp.clone();
let stone3 = stone.clone();
let warriors = vec![imp, stone, imp2, stone2, imp3, stone3];
for _ in 0..5000 {
let offsets = random_offsets(&warriors, 100, 100, 8000);
assert_eq!(offsets.len(), 6);
for offset in &offsets {
let mut ok = true;
for other in &offsets {
if offset.1!= other.1 {
let o1 = i64::try_from(offset.0).unwrap();
let o2 = i64::try_from(other.0).unwrap();
if i64::abs(o1 - o2) < 100 {
ok = false;
break;
}
}
}
assert!(ok);
}
}
}
}
| {
self.instruction_limit = instruction_limit;
self
} | identifier_body |
corebuilder.rs | use crate::{
error::CoreError,
logger::Logger,
warrior::{Instruction, Warrior},
};
use rand::Rng;
use super::{Core, CoreInstruction};
use std::collections::VecDeque;
#[derive(Debug)]
pub struct CoreBuilder {
pub(super) core_size: usize,
pub(super) cycles_before_tie: usize,
pub(super) initial_instruction: InitialInstruction,
pub(super) instruction_limit: usize,
pub(super) maximum_number_of_tasks: usize,
pub(super) minimum_separation: usize,
pub(super) read_distance: usize,
pub(super) write_distance: usize,
pub(super) separation: Separation,
pub(super) warriors: Vec<Warrior>,
pub(super) logger: Option<Box<dyn Logger>>,
}
impl Default for CoreBuilder {
fn default() -> Self {
Self {
core_size: 8000,
cycles_before_tie: 80_000,
initial_instruction: InitialInstruction::Fixed(Instruction::default()),
instruction_limit: 100,
maximum_number_of_tasks: 8000,
minimum_separation: 100,
read_distance: 8000,
write_distance: 8000,
separation: Separation::Random(100),
warriors: Vec::new(),
logger: None,
}
}
}
impl CoreBuilder {
/// Creates a new instance of CoreBuilder with default parameters and no warriors.
pub fn new() -> Self {
CoreBuilder::default()
}
/// Sets the core's size. Core size is the number of instructions which make up the core
/// during the battle.
pub fn core_size(&mut self, core_size: usize) -> &mut Self {
self.core_size = core_size;
self
}
/// Sets the number of cycles that the match can last for before it is declared a tie.
pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self {
self.cycles_before_tie = cycles_before_tie;
self
}
/// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. In addition to loading
/// an instruction such as "DAT #0, #0" into all of core, the
/// initial instruction could be set to `Random`, meaning core
/// instructions are filled with randomly generated instructions.
pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self {
self.initial_instruction = initial_instruction;
self
}
/// The maximum number of instructions allowed per warrior.
pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self {
self.instruction_limit = instruction_limit;
self
}
/// Each warrior can spawn multiple additional tasks. This variable sets the maximum
/// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue.
pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self {
self.maximum_number_of_tasks = maximum_number_of_tasks;
self
}
/// The minimum number of instructions from the first instruction
/// of one warrior to the first instruction of the next warrior.
pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self {
self.minimum_separation = minimum_separation;
// Need to put some limit on this related to number of warriors.
self
}
/// This is the range available for warriors to read information
/// from core. Attempts to read outside the limits of this range
/// result in reading within the local readable range. The range
/// is centered on the current instruction. Thus, a range of
/// 500 limits reading to offsets of (-249 -> +250) from the
/// currently executing instruction. The read limit can therefore
/// be considered a mini-core within core. An attempt to read
/// location PC+251 reads location PC-249 instead. An attempt to
/// read location PC+500 reads location PC instead.
///
/// Read distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn read_distance(&mut self, read_distance: usize) -> &mut Self {
self.read_distance = read_distance;
self
}
/// The number of instructions from the first instruction of one
/// warrior to the first instruction of the next warrior.
/// Separation can be set to `Random`, meaning separations will be
/// chosen randomly from those larger than the minimum separation.
pub fn separation(&mut self, separation: Separation) -> &mut Self {
self.separation = separation;
self
}
/// This is the range available for warriors to write information
/// to core. Attempts to write outside the limits of this range
/// result in writing within the local writable range. The range
/// is centered on the current instruction. Thus, a range of 500
/// limits writing to offsets of (-249 -> +250) from the
/// currently executing instruction. The write limit can
/// therefore be considered a mini-core within core. An attempt
/// to write location PC+251 writes to location PC-249 instead.
/// An attempt to write to location PC+500 writes to location PC
/// instead.
///
/// Write distance must be a factor of core size, otherwise the
/// above defined behaviour is not guaranteed.
pub fn write_distance(&mut self, write_distance: usize) -> &mut Self {
self.write_distance = write_distance;
self
}
pub fn load_warriors(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> {
for warrior in warriors {
if warrior.len() > self.instruction_limit |
if warrior.is_empty() {
return Err(CoreError::EmptyWarrior(
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
};
}
self.warriors = warriors.to_vec();
Ok(self)
}
/// Use a `Logger` to log the battle's output.
pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self {
self.logger = Some(logger);
self
}
/// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct.
pub fn build(&self) -> Result<Core, CoreError> {
let CoreBuilder {
initial_instruction,
separation,
warriors,
maximum_number_of_tasks,
core_size,
instruction_limit,
..
} = self;
let mut core_instructions = vec![
CoreInstruction::from_instruction(
initial_instruction.clone().extract(),
*core_size
);
*core_size
];
let separation = separation.clone();
let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect();
match separation {
Separation::Random(min_separation) => {
let offsets =
random_offsets(&warriors, min_separation, *instruction_limit, *core_size);
for (i, (offset, warrior)) in offsets.iter().enumerate() {
let mut ptr = *offset;
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
}
}
Separation::Fixed(separation) => {
let mut ptr = 0_usize;
for (i, warrior) in warriors.iter().enumerate() {
warrior_offsets[i] =
Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size);
for instruction in &warrior.instructions {
core_instructions[ptr] =
CoreInstruction::from_instruction(instruction.clone(), *core_size);
ptr = Core::fold(ptr + 1, *core_size, *core_size);
}
ptr = Core::fold(ptr + separation, *core_size, *core_size);
}
}
};
let task_queues = warrior_offsets
.iter()
.zip(warriors)
.map(|(&offset, warrior)| {
let mut v = VecDeque::with_capacity(*maximum_number_of_tasks);
let offset = Core::fold(offset, *core_size, *core_size);
v.push_back(offset);
(warrior, v)
})
.collect();
Ok(Core {
core: self,
instructions: core_instructions,
task_queues,
current_queue: 0,
cycle_count: 0,
})
}
}
/// The separation between warriors at the start of a match.
///
/// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior.
/// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation.
#[derive(Debug, Clone)]
pub enum Separation {
Random(usize),
Fixed(usize),
}
/// The value to which the core's memory addresses are initialised
/// at the beginning of the match.
///
/// The initial instruction is that instruction which is preloaded
/// into core prior to loading warriors. If set to `Random`, core
/// instructions are filled with randomly generated instructions.
#[derive(Debug, Clone)]
pub enum InitialInstruction {
Random,
Fixed(Instruction),
}
impl InitialInstruction {
/// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`.
pub fn extract(self) -> Instruction {
match self {
Self::Random => todo!(),
Self::Fixed(instr) => instr,
}
}
}
fn random_offsets(
warriors: &[Warrior],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> Vec<(usize, &Warrior)> {
let mut offsets: Vec<(usize, &Warrior)> = Vec::new();
for warrior in warriors {
let offset_addresses: Vec<usize> = offsets.iter().map(|x| x.0).collect();
let offset = get_valid_address(
&offset_addresses,
minimum_separation,
instruction_limit,
core_size,
);
offsets.push((offset, warrior));
}
offsets
}
fn get_valid_address(
offsets: &[usize],
minimum_separation: usize,
instruction_limit: usize,
core_size: usize,
) -> usize {
let diff = |x, y| {
if x > y {
x - y
} else {
((core_size - 1) + y) - x
}
};
let ptr: usize;
let mut rng = rand::thread_rng();
// This will run forever if we can't fit a warrior...
'outer: loop {
let address: usize = rng.gen_range(0, core_size);
for offset in offsets {
let lb = diff(address + instruction_limit, *offset);
let ub = diff(offset + instruction_limit, address);
if (lb <= minimum_separation) || (ub <= minimum_separation) {
continue 'outer;
}
}
ptr = address;
break;
}
ptr
}
#[cfg(test)]
mod test {
use super::*;
use std::convert::TryFrom;
#[test]
fn random_addresses() {
let imp = Warrior::parse(include_str!("../../warriors/imp.red"), 0).unwrap();
let stone = Warrior::parse(include_str!("../../warriors/stone.red"), 0).unwrap();
let imp2 = imp.clone();
let stone2 = stone.clone();
let imp3 = imp.clone();
let stone3 = stone.clone();
let warriors = vec![imp, stone, imp2, stone2, imp3, stone3];
for _ in 0..5000 {
let offsets = random_offsets(&warriors, 100, 100, 8000);
assert_eq!(offsets.len(), 6);
for offset in &offsets {
let mut ok = true;
for other in &offsets {
if offset.1!= other.1 {
let o1 = i64::try_from(offset.0).unwrap();
let o2 = i64::try_from(other.0).unwrap();
if i64::abs(o1 - o2) < 100 {
ok = false;
break;
}
}
}
assert!(ok);
}
}
}
}
| {
return Err(CoreError::WarriorTooLong(
warrior.len(),
self.instruction_limit,
warrior.metadata.name().unwrap_or("Unnamed").to_owned(),
));
} | conditional_block |
nbd.rs | //! Utility functions for working with nbd devices
use rpc::mayastor::*;
use crate::{
csi::{NodeStageVolumeRequest, NodeStageVolumeResponse},
device,
format::probed_format,
mount::{match_mount, mount_fs, Fs},
};
use enclose::enclose;
use futures::{
future::{err, ok, Either},
Future,
};
use glob::glob;
use jsonrpc;
use rpc::jsonrpc as jsondata;
use std::fmt;
use sysfs;
use tower_grpc::{Code, Response, Status};
use std::{path::PathBuf, sync::Mutex};
lazy_static! {
static ref ARRAY: Mutex<Vec<u32>> =
Mutex::new(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15]);
}
#[derive(Clone, Copy)]
pub struct NbdDevInfo {
instance: u32,
major: u64,
minor: u64,
}
impl fmt::Display for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "/dev/nbd{}", self.instance)
}
}
impl fmt::Debug for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nbd{} ({}:{})", self.instance, self.major, self.minor)
}
}
pub fn nbd_stage_volume(
socket: String,
msg: &NodeStageVolumeRequest,
filesystem: Fs,
mnt_opts: Vec<String>,
) -> Box<
dyn Future<Item = Response<NodeStageVolumeResponse>, Error = Status> + Send,
> {
//let msg = request.into_inner();
let uuid = msg.volume_id.clone();
let target_path = msg.staging_target_path.to_string();
let mount_fail = msg.publish_context.contains_key("mount");
let f = get_nbd_instance(&socket.clone(), &uuid)
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
// if we dont have a nbd device with a corresponding bdev,
// its an error ass it should
error!("No device instance found for {}, likely a bug", &uuid);
return err(Status::new(
Code::Internal,
"no such bdev exists".to_string(),
));
}
let nbd_disk = nbd_disk.unwrap();
if let Some(mount) = match_mount(
Some(&nbd_disk.nbd_device),
Some(&target_path),
false,
) {
if mount.source == nbd_disk.nbd_device
&& mount.dest == target_path
{
// the device is already mounted we should return OK
return ok((true, nbd_disk, target_path, uuid));
} else {
// something is there already return error
return err(Status::new(
Code::AlreadyExists,
"Some different BDEV on that path already".to_string(),
));
}
}
ok((false, nbd_disk, target_path, uuid))
})
.and_then(move |mounted| {
if!mounted.0 {
Either::A(
probed_format(&mounted.1.nbd_device, &filesystem.name)
.then(move |format_result| {
let mnt_result =
if mount_fail || format_result.is_err() {
if!mount_fail {
Err(format_result.unwrap_err())
} else {
debug!("Simulating mount failure");
Err("simulated".to_owned())
}
} else {
mount_fs(
&mounted.1.nbd_device,
&mounted.2,
false,
&filesystem.name,
&mnt_opts,
)
};
if let Err(reason) = mnt_result {
Box::new(err(Status::new(
Code::Internal,
reason,
)))
} else {
info!(
"staged {} on {}",
&mounted.3, &mounted.2
);
Box::new(ok(Response::new(
NodeStageVolumeResponse {},
)))
}
}),
)
} else {
Either::B(Box::new(ok(Response::new(
NodeStageVolumeResponse {},
))))
}
});
Box::new(f)
}
pub fn create_blkdev(
socket: String,
msg: &CreateBlkdevRequest,
) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send>
{
trace!("{:?}", msg);
debug!("Creating NBD device for {}...", msg.uuid);
let nbd_dev_info = NbdDevInfo::new();
let uuid = msg.uuid.clone();
// what ever instance we got assigned, it was in use, and is now removed
// from the device list
if nbd_dev_info.is_none() {
return Box::new(err(Status::new(
Code::Internal,
String::from("EAGAIN"),
)));
}
let nbd_dev_info = nbd_dev_info.unwrap();
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step in future chain by returning eexist from
// start-nbd-disk json-rpc method.
.and_then(enclose! { (uuid) move |bdev| {
if let Some(bdev) = bdev {
return err(Status::new(
Code::AlreadyExists,
format!(
"Bbdev {} already published at {}",
uuid,
bdev.nbd_device
),
));
}
ok(())
}})
.map_err(|e| jsonrpc::error::Error::GenericError(e.to_string()))
.and_then(enclose! { (uuid) move |_| {
jsonrpc::call::<jsondata::StartNbdDiskArgs, String>(
&socket,
"start_nbd_disk",
Some(jsondata::StartNbdDiskArgs {
bdev_name: uuid,
nbd_device: format!("{}", nbd_dev_info),
}),
)
}})
.and_then(move |nbd_device| {
trace!("NBD device {} created", &nbd_device);
device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from)
})
.and_then(move |size| {
info!("Device {} reported size: {}", nbd_dev_info, size);
let reply = CreateBlkdevReply {
blk_dev: format!("{}", nbd_dev_info),
};
ok(Response::new(reply))
})
.map_err(move |err| {
error!(
"Putting back nbd device {} due to error: {}",
nbd_dev_info,
err.to_string()
);
nbd_dev_info.put_back();
err.into_status()
});
Box::new(f)
}
pub fn destroy_blkdev(
socket: String,
msg: &DestroyBlkdevRequest,
) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> {
trace!("{:?}", msg);
let uuid = msg.uuid.clone();
debug!("Deleting NBD device for {}...", uuid);
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step by returning enoent from stop-nbd-disk
// json-rpc method.
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
trace!("bdev {} not found", uuid);
return err(Status::new(
Code::Internal,
format!("no such bdev {}", uuid),
));
}
let nbd_disk = nbd_disk.unwrap();
ok(nbd_disk)
})
.and_then(move |nbd_disk| {
trace!("Stopping NBD device {}", nbd_disk.nbd_device);
jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>(
&socket,
"stop_nbd_disk",
Some(jsondata::StopNbdDiskArgs {
nbd_device: nbd_disk.nbd_device.clone(),
}),
)
.map_err(|err| err.into_status())
.and_then(|done| {
if done {
info!(
"Stopped NBD device {} with bdev {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
NbdDevInfo::from(nbd_disk.nbd_device).put_back();
Box::new(ok(Response::new(Null {})))
} else {
let msg = format!(
"Failed to stop nbd device {} for {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
error!("{}", msg);
Box::new(err(Status::new(Code::Internal, msg)))
}
})
});
Box::new(f)
}
pub fn get_nbd_instance(
sock: &str,
bdev_name: &str,
) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> {
let bdev_name = bdev_name.to_string();
let socket = sock.to_string();
let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>(
&socket,
"get_bdevs",
Some(jsondata::GetBdevsArgs {
name: bdev_name.clone(),
}),
)
.map_err(|e| {
Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e))
})
.and_then(move |bdev| {
jsonrpc::call::<(), Vec<jsondata::NbdDisk>>(
&socket,
"get_nbd_disks",
None,
)
.map(move |nbd_disks| {
nbd_disks
.into_iter()
.find(|ent| ent.bdev_name == bdev[0].name)
})
.map_err(|err| {
Status::new(
Code::NotFound,
format!("Failed to find nbd disk: {}", err),
)
})
});
Box::new(f)
}
impl NbdDevInfo {
/// This will return the next available nbd device
pub fn new() -> Option<Self> {
let instance = ARRAY.lock().unwrap().pop()?;
trace!("Will use nbd slot {}", instance);
NbdDevInfo::create(instance)
}
fn create(instance: u32) -> Option<Self> {
let mut path =
PathBuf::from(&format!("/sys/class/block/nbd{}", instance));
path.push("pid");
if path.exists() {
trace!(
"Dropping nbd instance: {} as it appears to be in use",
instance
);
return None;
}
path.pop();
let e = path
.strip_prefix("/sys/class/block")
.unwrap()
.to_str()
.unwrap()
.split_at(3);
let instance = e.1.parse().unwrap();
let dev_t: String = sysfs::parse_value(&path, "dev").unwrap();
let nums: Vec<u64> =
dev_t.split(':').map(|x| x.parse().unwrap()).collect();
// Documentation/admin-guide/devices.txt
if nums[0]!= 43 {
warn!("Invalid major number of nbd dev {}", path.display());
}
let nbd = NbdDevInfo {
instance,
major: nums[0],
minor: nums[1],
};
assert_eq!(nbd.instance, instance);
Some(nbd)
}
pub fn put_back(&self) {
ARRAY.lock().unwrap().push(self.instance);
trace!("instance {} added back to the free list", self.instance);
}
pub fn | () -> usize {
glob("/sys/class/block/nbd*").unwrap().count()
}
}
impl From<String> for NbdDevInfo {
fn from(e: String) -> Self {
let instance: u32 = e.replace("/dev/nbd", "").parse().unwrap();
NbdDevInfo::create(instance).unwrap()
}
}
| num_devices | identifier_name |
nbd.rs | //! Utility functions for working with nbd devices
use rpc::mayastor::*;
use crate::{
csi::{NodeStageVolumeRequest, NodeStageVolumeResponse},
device,
format::probed_format,
mount::{match_mount, mount_fs, Fs},
};
use enclose::enclose;
use futures::{
future::{err, ok, Either},
Future,
};
use glob::glob;
use jsonrpc;
use rpc::jsonrpc as jsondata;
use std::fmt;
use sysfs;
use tower_grpc::{Code, Response, Status};
use std::{path::PathBuf, sync::Mutex};
lazy_static! {
static ref ARRAY: Mutex<Vec<u32>> =
Mutex::new(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15]);
}
#[derive(Clone, Copy)]
pub struct NbdDevInfo {
instance: u32,
major: u64,
minor: u64,
}
impl fmt::Display for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "/dev/nbd{}", self.instance)
}
}
impl fmt::Debug for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nbd{} ({}:{})", self.instance, self.major, self.minor)
}
}
pub fn nbd_stage_volume(
socket: String,
msg: &NodeStageVolumeRequest,
filesystem: Fs,
mnt_opts: Vec<String>,
) -> Box<
dyn Future<Item = Response<NodeStageVolumeResponse>, Error = Status> + Send,
> {
//let msg = request.into_inner();
let uuid = msg.volume_id.clone();
let target_path = msg.staging_target_path.to_string();
let mount_fail = msg.publish_context.contains_key("mount");
let f = get_nbd_instance(&socket.clone(), &uuid)
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
// if we dont have a nbd device with a corresponding bdev,
// its an error ass it should
error!("No device instance found for {}, likely a bug", &uuid);
return err(Status::new(
Code::Internal,
"no such bdev exists".to_string(),
));
}
let nbd_disk = nbd_disk.unwrap();
if let Some(mount) = match_mount(
Some(&nbd_disk.nbd_device),
Some(&target_path),
false,
) {
if mount.source == nbd_disk.nbd_device
&& mount.dest == target_path
{
// the device is already mounted we should return OK
return ok((true, nbd_disk, target_path, uuid));
} else {
// something is there already return error
return err(Status::new(
Code::AlreadyExists,
"Some different BDEV on that path already".to_string(),
));
}
}
ok((false, nbd_disk, target_path, uuid))
})
.and_then(move |mounted| {
if!mounted.0 {
Either::A(
probed_format(&mounted.1.nbd_device, &filesystem.name)
.then(move |format_result| {
let mnt_result =
if mount_fail || format_result.is_err() {
if!mount_fail {
Err(format_result.unwrap_err())
} else {
debug!("Simulating mount failure");
Err("simulated".to_owned())
}
} else {
mount_fs(
&mounted.1.nbd_device,
&mounted.2,
false,
&filesystem.name,
&mnt_opts,
)
};
if let Err(reason) = mnt_result {
Box::new(err(Status::new(
Code::Internal,
reason,
)))
} else {
info!(
"staged {} on {}",
&mounted.3, &mounted.2
);
Box::new(ok(Response::new(
NodeStageVolumeResponse {},
)))
}
}),
)
} else {
Either::B(Box::new(ok(Response::new(
NodeStageVolumeResponse {},
))))
}
});
Box::new(f)
}
pub fn create_blkdev(
socket: String,
msg: &CreateBlkdevRequest,
) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send>
{
trace!("{:?}", msg);
debug!("Creating NBD device for {}...", msg.uuid);
let nbd_dev_info = NbdDevInfo::new();
let uuid = msg.uuid.clone();
// what ever instance we got assigned, it was in use, and is now removed
// from the device list
if nbd_dev_info.is_none() {
return Box::new(err(Status::new(
Code::Internal,
String::from("EAGAIN"),
)));
}
let nbd_dev_info = nbd_dev_info.unwrap();
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step in future chain by returning eexist from
// start-nbd-disk json-rpc method.
.and_then(enclose! { (uuid) move |bdev| {
if let Some(bdev) = bdev {
return err(Status::new(
Code::AlreadyExists,
format!(
"Bbdev {} already published at {}",
uuid,
bdev.nbd_device
),
));
}
ok(())
}})
.map_err(|e| jsonrpc::error::Error::GenericError(e.to_string()))
.and_then(enclose! { (uuid) move |_| {
jsonrpc::call::<jsondata::StartNbdDiskArgs, String>(
&socket,
"start_nbd_disk",
Some(jsondata::StartNbdDiskArgs {
bdev_name: uuid,
nbd_device: format!("{}", nbd_dev_info),
}),
)
}})
.and_then(move |nbd_device| {
trace!("NBD device {} created", &nbd_device);
device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from)
})
.and_then(move |size| {
info!("Device {} reported size: {}", nbd_dev_info, size);
let reply = CreateBlkdevReply {
blk_dev: format!("{}", nbd_dev_info),
};
ok(Response::new(reply))
})
.map_err(move |err| {
error!(
"Putting back nbd device {} due to error: {}",
nbd_dev_info,
err.to_string()
);
nbd_dev_info.put_back();
err.into_status()
});
Box::new(f)
}
pub fn destroy_blkdev(
socket: String,
msg: &DestroyBlkdevRequest,
) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> {
trace!("{:?}", msg);
let uuid = msg.uuid.clone();
debug!("Deleting NBD device for {}...", uuid);
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step by returning enoent from stop-nbd-disk
// json-rpc method.
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
trace!("bdev {} not found", uuid);
return err(Status::new(
Code::Internal,
format!("no such bdev {}", uuid),
));
}
let nbd_disk = nbd_disk.unwrap();
ok(nbd_disk)
})
.and_then(move |nbd_disk| {
trace!("Stopping NBD device {}", nbd_disk.nbd_device);
jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>(
&socket,
"stop_nbd_disk",
Some(jsondata::StopNbdDiskArgs {
nbd_device: nbd_disk.nbd_device.clone(),
}),
)
.map_err(|err| err.into_status())
.and_then(|done| {
if done | else {
let msg = format!(
"Failed to stop nbd device {} for {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
error!("{}", msg);
Box::new(err(Status::new(Code::Internal, msg)))
}
})
});
Box::new(f)
}
pub fn get_nbd_instance(
sock: &str,
bdev_name: &str,
) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> {
let bdev_name = bdev_name.to_string();
let socket = sock.to_string();
let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>(
&socket,
"get_bdevs",
Some(jsondata::GetBdevsArgs {
name: bdev_name.clone(),
}),
)
.map_err(|e| {
Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e))
})
.and_then(move |bdev| {
jsonrpc::call::<(), Vec<jsondata::NbdDisk>>(
&socket,
"get_nbd_disks",
None,
)
.map(move |nbd_disks| {
nbd_disks
.into_iter()
.find(|ent| ent.bdev_name == bdev[0].name)
})
.map_err(|err| {
Status::new(
Code::NotFound,
format!("Failed to find nbd disk: {}", err),
)
})
});
Box::new(f)
}
impl NbdDevInfo {
/// This will return the next available nbd device
pub fn new() -> Option<Self> {
let instance = ARRAY.lock().unwrap().pop()?;
trace!("Will use nbd slot {}", instance);
NbdDevInfo::create(instance)
}
fn create(instance: u32) -> Option<Self> {
let mut path =
PathBuf::from(&format!("/sys/class/block/nbd{}", instance));
path.push("pid");
if path.exists() {
trace!(
"Dropping nbd instance: {} as it appears to be in use",
instance
);
return None;
}
path.pop();
let e = path
.strip_prefix("/sys/class/block")
.unwrap()
.to_str()
.unwrap()
.split_at(3);
let instance = e.1.parse().unwrap();
let dev_t: String = sysfs::parse_value(&path, "dev").unwrap();
let nums: Vec<u64> =
dev_t.split(':').map(|x| x.parse().unwrap()).collect();
// Documentation/admin-guide/devices.txt
if nums[0]!= 43 {
warn!("Invalid major number of nbd dev {}", path.display());
}
let nbd = NbdDevInfo {
instance,
major: nums[0],
minor: nums[1],
};
assert_eq!(nbd.instance, instance);
Some(nbd)
}
pub fn put_back(&self) {
ARRAY.lock().unwrap().push(self.instance);
trace!("instance {} added back to the free list", self.instance);
}
pub fn num_devices() -> usize {
glob("/sys/class/block/nbd*").unwrap().count()
}
}
impl From<String> for NbdDevInfo {
fn from(e: String) -> Self {
let instance: u32 = e.replace("/dev/nbd", "").parse().unwrap();
NbdDevInfo::create(instance).unwrap()
}
}
| {
info!(
"Stopped NBD device {} with bdev {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
NbdDevInfo::from(nbd_disk.nbd_device).put_back();
Box::new(ok(Response::new(Null {})))
} | conditional_block |
nbd.rs | //! Utility functions for working with nbd devices
use rpc::mayastor::*;
use crate::{
csi::{NodeStageVolumeRequest, NodeStageVolumeResponse},
device,
format::probed_format,
mount::{match_mount, mount_fs, Fs},
};
use enclose::enclose;
use futures::{
future::{err, ok, Either},
Future,
};
use glob::glob;
use jsonrpc;
use rpc::jsonrpc as jsondata;
use std::fmt;
use sysfs;
use tower_grpc::{Code, Response, Status};
use std::{path::PathBuf, sync::Mutex};
lazy_static! {
static ref ARRAY: Mutex<Vec<u32>> =
Mutex::new(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15]);
}
#[derive(Clone, Copy)]
pub struct NbdDevInfo {
instance: u32,
major: u64,
minor: u64,
}
impl fmt::Display for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl fmt::Debug for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nbd{} ({}:{})", self.instance, self.major, self.minor)
}
}
pub fn nbd_stage_volume(
socket: String,
msg: &NodeStageVolumeRequest,
filesystem: Fs,
mnt_opts: Vec<String>,
) -> Box<
dyn Future<Item = Response<NodeStageVolumeResponse>, Error = Status> + Send,
> {
//let msg = request.into_inner();
let uuid = msg.volume_id.clone();
let target_path = msg.staging_target_path.to_string();
let mount_fail = msg.publish_context.contains_key("mount");
let f = get_nbd_instance(&socket.clone(), &uuid)
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
// if we dont have a nbd device with a corresponding bdev,
// its an error ass it should
error!("No device instance found for {}, likely a bug", &uuid);
return err(Status::new(
Code::Internal,
"no such bdev exists".to_string(),
));
}
let nbd_disk = nbd_disk.unwrap();
if let Some(mount) = match_mount(
Some(&nbd_disk.nbd_device),
Some(&target_path),
false,
) {
if mount.source == nbd_disk.nbd_device
&& mount.dest == target_path
{
// the device is already mounted we should return OK
return ok((true, nbd_disk, target_path, uuid));
} else {
// something is there already return error
return err(Status::new(
Code::AlreadyExists,
"Some different BDEV on that path already".to_string(),
));
}
}
ok((false, nbd_disk, target_path, uuid))
})
.and_then(move |mounted| {
if!mounted.0 {
Either::A(
probed_format(&mounted.1.nbd_device, &filesystem.name)
.then(move |format_result| {
let mnt_result =
if mount_fail || format_result.is_err() {
if!mount_fail {
Err(format_result.unwrap_err())
} else {
debug!("Simulating mount failure");
Err("simulated".to_owned())
}
} else {
mount_fs(
&mounted.1.nbd_device,
&mounted.2,
false,
&filesystem.name,
&mnt_opts,
)
};
if let Err(reason) = mnt_result {
Box::new(err(Status::new(
Code::Internal,
reason,
)))
} else {
info!(
"staged {} on {}",
&mounted.3, &mounted.2
);
Box::new(ok(Response::new(
NodeStageVolumeResponse {},
)))
}
}),
)
} else {
Either::B(Box::new(ok(Response::new(
NodeStageVolumeResponse {},
))))
}
});
Box::new(f)
}
pub fn create_blkdev(
socket: String,
msg: &CreateBlkdevRequest,
) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send>
{
trace!("{:?}", msg);
debug!("Creating NBD device for {}...", msg.uuid);
let nbd_dev_info = NbdDevInfo::new();
let uuid = msg.uuid.clone();
// what ever instance we got assigned, it was in use, and is now removed
// from the device list
if nbd_dev_info.is_none() {
return Box::new(err(Status::new(
Code::Internal,
String::from("EAGAIN"),
)));
}
let nbd_dev_info = nbd_dev_info.unwrap();
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step in future chain by returning eexist from
// start-nbd-disk json-rpc method.
.and_then(enclose! { (uuid) move |bdev| {
if let Some(bdev) = bdev {
return err(Status::new(
Code::AlreadyExists,
format!(
"Bbdev {} already published at {}",
uuid,
bdev.nbd_device
),
));
}
ok(())
}})
.map_err(|e| jsonrpc::error::Error::GenericError(e.to_string()))
.and_then(enclose! { (uuid) move |_| {
jsonrpc::call::<jsondata::StartNbdDiskArgs, String>(
&socket,
"start_nbd_disk",
Some(jsondata::StartNbdDiskArgs {
bdev_name: uuid,
nbd_device: format!("{}", nbd_dev_info),
}),
)
}})
.and_then(move |nbd_device| {
trace!("NBD device {} created", &nbd_device);
device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from)
})
.and_then(move |size| {
info!("Device {} reported size: {}", nbd_dev_info, size);
let reply = CreateBlkdevReply {
blk_dev: format!("{}", nbd_dev_info),
};
ok(Response::new(reply))
})
.map_err(move |err| {
error!(
"Putting back nbd device {} due to error: {}",
nbd_dev_info,
err.to_string()
);
nbd_dev_info.put_back();
err.into_status()
});
Box::new(f)
}
pub fn destroy_blkdev(
socket: String,
msg: &DestroyBlkdevRequest,
) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> {
trace!("{:?}", msg);
let uuid = msg.uuid.clone();
debug!("Deleting NBD device for {}...", uuid);
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step by returning enoent from stop-nbd-disk
// json-rpc method.
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
trace!("bdev {} not found", uuid);
return err(Status::new(
Code::Internal,
format!("no such bdev {}", uuid),
));
}
let nbd_disk = nbd_disk.unwrap();
ok(nbd_disk)
})
.and_then(move |nbd_disk| {
trace!("Stopping NBD device {}", nbd_disk.nbd_device);
jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>(
&socket,
"stop_nbd_disk",
Some(jsondata::StopNbdDiskArgs {
nbd_device: nbd_disk.nbd_device.clone(),
}),
)
.map_err(|err| err.into_status())
.and_then(|done| {
if done {
info!(
"Stopped NBD device {} with bdev {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
NbdDevInfo::from(nbd_disk.nbd_device).put_back();
Box::new(ok(Response::new(Null {})))
} else {
let msg = format!(
"Failed to stop nbd device {} for {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
error!("{}", msg);
Box::new(err(Status::new(Code::Internal, msg)))
}
})
});
Box::new(f)
}
pub fn get_nbd_instance(
sock: &str,
bdev_name: &str,
) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> {
let bdev_name = bdev_name.to_string();
let socket = sock.to_string();
let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>(
&socket,
"get_bdevs",
Some(jsondata::GetBdevsArgs {
name: bdev_name.clone(),
}),
)
.map_err(|e| {
Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e))
})
.and_then(move |bdev| {
jsonrpc::call::<(), Vec<jsondata::NbdDisk>>(
&socket,
"get_nbd_disks",
None,
)
.map(move |nbd_disks| {
nbd_disks
.into_iter()
.find(|ent| ent.bdev_name == bdev[0].name)
})
.map_err(|err| {
Status::new(
Code::NotFound,
format!("Failed to find nbd disk: {}", err),
)
})
});
Box::new(f)
}
impl NbdDevInfo {
/// This will return the next available nbd device
pub fn new() -> Option<Self> {
let instance = ARRAY.lock().unwrap().pop()?;
trace!("Will use nbd slot {}", instance);
NbdDevInfo::create(instance)
}
fn create(instance: u32) -> Option<Self> {
let mut path =
PathBuf::from(&format!("/sys/class/block/nbd{}", instance));
path.push("pid");
if path.exists() {
trace!(
"Dropping nbd instance: {} as it appears to be in use",
instance
);
return None;
}
path.pop();
let e = path
.strip_prefix("/sys/class/block")
.unwrap()
.to_str()
.unwrap()
.split_at(3);
let instance = e.1.parse().unwrap();
let dev_t: String = sysfs::parse_value(&path, "dev").unwrap();
let nums: Vec<u64> =
dev_t.split(':').map(|x| x.parse().unwrap()).collect();
// Documentation/admin-guide/devices.txt
if nums[0]!= 43 {
warn!("Invalid major number of nbd dev {}", path.display());
}
let nbd = NbdDevInfo {
instance,
major: nums[0],
minor: nums[1],
};
assert_eq!(nbd.instance, instance);
Some(nbd)
}
pub fn put_back(&self) {
ARRAY.lock().unwrap().push(self.instance);
trace!("instance {} added back to the free list", self.instance);
}
pub fn num_devices() -> usize {
glob("/sys/class/block/nbd*").unwrap().count()
}
}
impl From<String> for NbdDevInfo {
fn from(e: String) -> Self {
let instance: u32 = e.replace("/dev/nbd", "").parse().unwrap();
NbdDevInfo::create(instance).unwrap()
}
}
| {
write!(f, "/dev/nbd{}", self.instance)
} | identifier_body |
nbd.rs | //! Utility functions for working with nbd devices
use rpc::mayastor::*;
use crate::{
csi::{NodeStageVolumeRequest, NodeStageVolumeResponse},
device,
format::probed_format,
mount::{match_mount, mount_fs, Fs},
};
use enclose::enclose;
use futures::{
future::{err, ok, Either},
Future,
};
use glob::glob;
use jsonrpc;
use rpc::jsonrpc as jsondata;
use std::fmt;
use sysfs;
use tower_grpc::{Code, Response, Status};
use std::{path::PathBuf, sync::Mutex};
lazy_static! {
static ref ARRAY: Mutex<Vec<u32>> =
Mutex::new(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15]);
}
#[derive(Clone, Copy)]
pub struct NbdDevInfo {
instance: u32,
major: u64,
minor: u64,
}
impl fmt::Display for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "/dev/nbd{}", self.instance)
}
}
impl fmt::Debug for NbdDevInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nbd{} ({}:{})", self.instance, self.major, self.minor)
}
}
pub fn nbd_stage_volume(
socket: String,
msg: &NodeStageVolumeRequest,
filesystem: Fs,
mnt_opts: Vec<String>,
) -> Box<
dyn Future<Item = Response<NodeStageVolumeResponse>, Error = Status> + Send,
> {
//let msg = request.into_inner();
let uuid = msg.volume_id.clone();
let target_path = msg.staging_target_path.to_string();
let mount_fail = msg.publish_context.contains_key("mount");
let f = get_nbd_instance(&socket.clone(), &uuid)
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
// if we dont have a nbd device with a corresponding bdev,
// its an error ass it should
error!("No device instance found for {}, likely a bug", &uuid);
return err(Status::new(
Code::Internal,
"no such bdev exists".to_string(),
));
}
let nbd_disk = nbd_disk.unwrap();
if let Some(mount) = match_mount(
Some(&nbd_disk.nbd_device),
Some(&target_path),
false,
) {
if mount.source == nbd_disk.nbd_device
&& mount.dest == target_path
{
// the device is already mounted we should return OK
return ok((true, nbd_disk, target_path, uuid));
} else {
// something is there already return error
return err(Status::new(
Code::AlreadyExists,
"Some different BDEV on that path already".to_string(),
));
}
}
ok((false, nbd_disk, target_path, uuid))
})
.and_then(move |mounted| {
if!mounted.0 {
Either::A(
probed_format(&mounted.1.nbd_device, &filesystem.name)
.then(move |format_result| {
let mnt_result =
if mount_fail || format_result.is_err() {
if!mount_fail {
Err(format_result.unwrap_err())
} else {
debug!("Simulating mount failure");
Err("simulated".to_owned())
}
} else {
mount_fs(
&mounted.1.nbd_device,
&mounted.2,
false,
&filesystem.name,
&mnt_opts,
)
};
if let Err(reason) = mnt_result {
Box::new(err(Status::new(
Code::Internal,
reason,
)))
} else {
info!(
"staged {} on {}",
&mounted.3, &mounted.2
);
Box::new(ok(Response::new(
NodeStageVolumeResponse {},
)))
}
}),
)
} else {
Either::B(Box::new(ok(Response::new(
NodeStageVolumeResponse {},
))))
}
});
Box::new(f)
}
pub fn create_blkdev(
socket: String,
msg: &CreateBlkdevRequest,
) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send>
{
trace!("{:?}", msg);
debug!("Creating NBD device for {}...", msg.uuid);
let nbd_dev_info = NbdDevInfo::new();
let uuid = msg.uuid.clone();
// what ever instance we got assigned, it was in use, and is now removed
// from the device list
if nbd_dev_info.is_none() {
return Box::new(err(Status::new(
Code::Internal,
String::from("EAGAIN"),
)));
}
let nbd_dev_info = nbd_dev_info.unwrap();
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step in future chain by returning eexist from
// start-nbd-disk json-rpc method.
.and_then(enclose! { (uuid) move |bdev| {
if let Some(bdev) = bdev {
return err(Status::new(
Code::AlreadyExists,
format!(
"Bbdev {} already published at {}",
uuid,
bdev.nbd_device
),
));
}
ok(())
}})
.map_err(|e| jsonrpc::error::Error::GenericError(e.to_string()))
.and_then(enclose! { (uuid) move |_| {
jsonrpc::call::<jsondata::StartNbdDiskArgs, String>(
&socket,
"start_nbd_disk",
Some(jsondata::StartNbdDiskArgs {
bdev_name: uuid,
nbd_device: format!("{}", nbd_dev_info),
}),
)
}})
.and_then(move |nbd_device| {
trace!("NBD device {} created", &nbd_device);
device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from)
})
.and_then(move |size| {
info!("Device {} reported size: {}", nbd_dev_info, size);
let reply = CreateBlkdevReply {
blk_dev: format!("{}", nbd_dev_info),
};
ok(Response::new(reply))
})
.map_err(move |err| {
error!(
"Putting back nbd device {} due to error: {}",
nbd_dev_info,
err.to_string()
);
nbd_dev_info.put_back();
err.into_status()
});
Box::new(f)
}
pub fn destroy_blkdev(
socket: String,
msg: &DestroyBlkdevRequest,
) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> {
trace!("{:?}", msg);
let uuid = msg.uuid.clone();
debug!("Deleting NBD device for {}...", uuid);
let f = get_nbd_instance(&socket, &uuid)
// TODO: Avoid this step by returning enoent from stop-nbd-disk
// json-rpc method.
.and_then(move |nbd_disk| {
if nbd_disk.is_none() {
trace!("bdev {} not found", uuid);
return err(Status::new(
Code::Internal,
format!("no such bdev {}", uuid),
));
}
let nbd_disk = nbd_disk.unwrap();
ok(nbd_disk)
})
.and_then(move |nbd_disk| {
trace!("Stopping NBD device {}", nbd_disk.nbd_device);
jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>(
&socket,
"stop_nbd_disk",
Some(jsondata::StopNbdDiskArgs {
nbd_device: nbd_disk.nbd_device.clone(),
}),
)
.map_err(|err| err.into_status())
.and_then(|done| {
if done {
info!(
"Stopped NBD device {} with bdev {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
NbdDevInfo::from(nbd_disk.nbd_device).put_back();
Box::new(ok(Response::new(Null {})))
} else {
let msg = format!(
"Failed to stop nbd device {} for {}",
nbd_disk.nbd_device, nbd_disk.bdev_name
);
error!("{}", msg);
Box::new(err(Status::new(Code::Internal, msg)))
}
})
});
Box::new(f)
}
pub fn get_nbd_instance(
sock: &str,
bdev_name: &str,
) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> {
let bdev_name = bdev_name.to_string();
let socket = sock.to_string();
let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>(
&socket,
"get_bdevs",
Some(jsondata::GetBdevsArgs {
name: bdev_name.clone(),
}),
)
.map_err(|e| {
Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e))
})
.and_then(move |bdev| {
jsonrpc::call::<(), Vec<jsondata::NbdDisk>>(
&socket,
"get_nbd_disks",
None,
)
.map(move |nbd_disks| {
nbd_disks
.into_iter()
.find(|ent| ent.bdev_name == bdev[0].name)
})
.map_err(|err| {
Status::new(
Code::NotFound,
format!("Failed to find nbd disk: {}", err),
)
})
});
Box::new(f)
}
impl NbdDevInfo {
/// This will return the next available nbd device
pub fn new() -> Option<Self> {
let instance = ARRAY.lock().unwrap().pop()?;
trace!("Will use nbd slot {}", instance);
NbdDevInfo::create(instance)
}
fn create(instance: u32) -> Option<Self> {
let mut path =
PathBuf::from(&format!("/sys/class/block/nbd{}", instance));
path.push("pid");
if path.exists() {
trace!(
"Dropping nbd instance: {} as it appears to be in use",
instance
);
return None;
}
path.pop();
let e = path
.strip_prefix("/sys/class/block")
.unwrap()
.to_str()
.unwrap()
.split_at(3);
let instance = e.1.parse().unwrap();
let dev_t: String = sysfs::parse_value(&path, "dev").unwrap();
let nums: Vec<u64> =
dev_t.split(':').map(|x| x.parse().unwrap()).collect();
// Documentation/admin-guide/devices.txt
if nums[0]!= 43 {
warn!("Invalid major number of nbd dev {}", path.display());
}
let nbd = NbdDevInfo {
instance,
major: nums[0],
minor: nums[1],
};
assert_eq!(nbd.instance, instance);
Some(nbd)
}
pub fn put_back(&self) {
ARRAY.lock().unwrap().push(self.instance);
trace!("instance {} added back to the free list", self.instance);
}
pub fn num_devices() -> usize {
glob("/sys/class/block/nbd*").unwrap().count()
} | }
impl From<String> for NbdDevInfo {
fn from(e: String) -> Self {
let instance: u32 = e.replace("/dev/nbd", "").parse().unwrap();
NbdDevInfo::create(instance).unwrap()
}
} | random_line_split |
|
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish(); |
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if!auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if!auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
}
}
}
return false;
} | }
}; | random_line_split |
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if!auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web | oad, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if!auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
}
}
}
return false;
}
| ::Payl | identifier_name |
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if!auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if!auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
|
}
| }
}
}
return false; | conditional_block |
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if!auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if!check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifie | let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn
main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if!auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
}
}
}
return false;
}
| r le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file"); | identifier_body |
lib.register_lints.rs | will be overwritten.
store.register_lints(&[
#[cfg(feature = "internal")]
utils::internal_lints::CLIPPY_LINTS_INTERNAL,
#[cfg(feature = "internal")]
utils::internal_lints::COLLAPSIBLE_SPAN_LINT_CALLS,
#[cfg(feature = "internal")]
utils::internal_lints::COMPILER_LINT_FUNCTIONS,
#[cfg(feature = "internal")]
utils::internal_lints::DEFAULT_LINT,
#[cfg(feature = "internal")]
utils::internal_lints::IF_CHAIN_STYLE,
#[cfg(feature = "internal")]
utils::internal_lints::INTERNING_DEFINED_SYMBOL,
#[cfg(feature = "internal")]
utils::internal_lints::INVALID_CLIPPY_VERSION_ATTRIBUTE,
#[cfg(feature = "internal")]
utils::internal_lints::INVALID_PATHS,
#[cfg(feature = "internal")]
utils::internal_lints::LINT_WITHOUT_LINT_PASS,
#[cfg(feature = "internal")]
utils::internal_lints::MATCH_TYPE_ON_DIAGNOSTIC_ITEM,
#[cfg(feature = "internal")]
utils::internal_lints::MISSING_CLIPPY_VERSION_ATTRIBUTE,
#[cfg(feature = "internal")]
utils::internal_lints::MISSING_MSRV_ATTR_IMPL,
#[cfg(feature = "internal")]
utils::internal_lints::OUTER_EXPN_EXPN_DATA,
#[cfg(feature = "internal")]
utils::internal_lints::PRODUCE_ICE,
#[cfg(feature = "internal")]
utils::internal_lints::UNNECESSARY_SYMBOL_STR,
absurd_extreme_comparisons::ABSURD_EXTREME_COMPARISONS,
approx_const::APPROX_CONSTANT,
arithmetic::FLOAT_ARITHMETIC,
arithmetic::INTEGER_ARITHMETIC,
as_conversions::AS_CONVERSIONS,
asm_syntax::INLINE_ASM_X86_ATT_SYNTAX,
asm_syntax::INLINE_ASM_X86_INTEL_SYNTAX,
assertions_on_constants::ASSERTIONS_ON_CONSTANTS,
assign_ops::ASSIGN_OP_PATTERN,
assign_ops::MISREFACTORED_ASSIGN_OP,
async_yields_async::ASYNC_YIELDS_ASYNC,
attrs::ALLOW_ATTRIBUTES_WITHOUT_REASON,
attrs::BLANKET_CLIPPY_RESTRICTION_LINTS,
attrs::DEPRECATED_CFG_ATTR,
attrs::DEPRECATED_SEMVER,
attrs::EMPTY_LINE_AFTER_OUTER_ATTR,
attrs::INLINE_ALWAYS,
attrs::MISMATCHED_TARGET_OS,
attrs::USELESS_ATTRIBUTE,
await_holding_invalid::AWAIT_HOLDING_LOCK,
await_holding_invalid::AWAIT_HOLDING_REFCELL_REF,
bit_mask::BAD_BIT_MASK,
bit_mask::INEFFECTIVE_BIT_MASK,
bit_mask::VERBOSE_BIT_MASK,
blacklisted_name::BLACKLISTED_NAME,
blocks_in_if_conditions::BLOCKS_IN_IF_CONDITIONS,
bool_assert_comparison::BOOL_ASSERT_COMPARISON,
booleans::LOGIC_BUG,
booleans::NONMINIMAL_BOOL,
borrow_as_ptr::BORROW_AS_PTR,
bytecount::NAIVE_BYTECOUNT,
cargo::CARGO_COMMON_METADATA,
cargo::MULTIPLE_CRATE_VERSIONS,
cargo::NEGATIVE_FEATURE_NAMES,
cargo::REDUNDANT_FEATURE_NAMES,
cargo::WILDCARD_DEPENDENCIES,
case_sensitive_file_extension_comparisons::CASE_SENSITIVE_FILE_EXTENSION_COMPARISONS,
casts::CAST_ENUM_CONSTRUCTOR,
casts::CAST_ENUM_TRUNCATION,
casts::CAST_LOSSLESS,
casts::CAST_POSSIBLE_TRUNCATION,
casts::CAST_POSSIBLE_WRAP,
casts::CAST_PRECISION_LOSS,
casts::CAST_PTR_ALIGNMENT,
casts::CAST_REF_TO_MUT,
casts::CAST_SIGN_LOSS,
casts::CAST_SLICE_DIFFERENT_SIZES,
casts::CHAR_LIT_AS_U8,
casts::FN_TO_NUMERIC_CAST,
casts::FN_TO_NUMERIC_CAST_ANY,
casts::FN_TO_NUMERIC_CAST_WITH_TRUNCATION,
casts::PTR_AS_PTR,
casts::UNNECESSARY_CAST,
checked_conversions::CHECKED_CONVERSIONS,
cognitive_complexity::COGNITIVE_COMPLEXITY, | comparison_chain::COMPARISON_CHAIN,
copies::BRANCHES_SHARING_CODE,
copies::IFS_SAME_COND,
copies::IF_SAME_THEN_ELSE,
copies::SAME_FUNCTIONS_IN_IF_CONDITION,
copy_iterator::COPY_ITERATOR,
create_dir::CREATE_DIR,
dbg_macro::DBG_MACRO,
default::DEFAULT_TRAIT_ACCESS,
default::FIELD_REASSIGN_WITH_DEFAULT,
default_numeric_fallback::DEFAULT_NUMERIC_FALLBACK,
default_union_representation::DEFAULT_UNION_REPRESENTATION,
dereference::EXPLICIT_DEREF_METHODS,
dereference::NEEDLESS_BORROW,
dereference::REF_BINDING_TO_REFERENCE,
derivable_impls::DERIVABLE_IMPLS,
derive::DERIVE_HASH_XOR_EQ,
derive::DERIVE_ORD_XOR_PARTIAL_ORD,
derive::EXPL_IMPL_CLONE_ON_COPY,
derive::UNSAFE_DERIVE_DESERIALIZE,
disallowed_methods::DISALLOWED_METHODS,
disallowed_script_idents::DISALLOWED_SCRIPT_IDENTS,
disallowed_types::DISALLOWED_TYPES,
doc::DOC_MARKDOWN,
doc::MISSING_ERRORS_DOC,
doc::MISSING_PANICS_DOC,
doc::MISSING_SAFETY_DOC,
doc::NEEDLESS_DOCTEST_MAIN,
double_comparison::DOUBLE_COMPARISONS,
double_parens::DOUBLE_PARENS,
drop_forget_ref::DROP_COPY,
drop_forget_ref::DROP_REF,
drop_forget_ref::FORGET_COPY,
drop_forget_ref::FORGET_REF,
duration_subsec::DURATION_SUBSEC,
else_if_without_else::ELSE_IF_WITHOUT_ELSE,
empty_enum::EMPTY_ENUM,
entry::MAP_ENTRY,
enum_clike::ENUM_CLIKE_UNPORTABLE_VARIANT,
enum_variants::ENUM_VARIANT_NAMES,
enum_variants::MODULE_INCEPTION,
enum_variants::MODULE_NAME_REPETITIONS,
eq_op::EQ_OP,
eq_op::OP_REF,
equatable_if_let::EQUATABLE_IF_LET,
erasing_op::ERASING_OP,
escape::BOXED_LOCAL,
eta_reduction::REDUNDANT_CLOSURE,
eta_reduction::REDUNDANT_CLOSURE_FOR_METHOD_CALLS,
eval_order_dependence::DIVERGING_SUB_EXPRESSION,
eval_order_dependence::EVAL_ORDER_DEPENDENCE,
excessive_bools::FN_PARAMS_EXCESSIVE_BOOLS,
excessive_bools::STRUCT_EXCESSIVE_BOOLS,
exhaustive_items::EXHAUSTIVE_ENUMS,
exhaustive_items::EXHAUSTIVE_STRUCTS,
exit::EXIT,
explicit_write::EXPLICIT_WRITE,
fallible_impl_from::FALLIBLE_IMPL_FROM,
float_equality_without_abs::FLOAT_EQUALITY_WITHOUT_ABS,
float_literal::EXCESSIVE_PRECISION,
float_literal::LOSSY_FLOAT_LITERAL,
floating_point_arithmetic::IMPRECISE_FLOPS,
floating_point_arithmetic::SUBOPTIMAL_FLOPS,
format::USELESS_FORMAT,
format_args::FORMAT_IN_FORMAT_ARGS,
format_args::TO_STRING_IN_FORMAT_ARGS,
format_impl::PRINT_IN_FORMAT_IMPL,
format_impl::RECURSIVE_FORMAT_IMPL,
formatting::POSSIBLE_MISSING_COMMA,
formatting::SUSPICIOUS_ASSIGNMENT_FORMATTING,
formatting::SUSPICIOUS_ELSE_FORMATTING,
formatting::SUSPICIOUS_UNARY_OP_FORMATTING,
from_over_into::FROM_OVER_INTO,
from_str_radix_10::FROM_STR_RADIX_10,
functions::DOUBLE_MUST_USE,
functions::MUST_USE_CANDIDATE,
functions::MUST_USE_UNIT,
functions::NOT_UNSAFE_PTR_ARG_DEREF,
functions::RESULT_UNIT_ERR,
functions::TOO_MANY_ARGUMENTS,
functions::TOO_MANY_LINES,
future_not_send::FUTURE_NOT_SEND,
get_last_with_len::GET_LAST_WITH_LEN,
identity_op::IDENTITY_OP,
if_let_mutex::IF_LET_MUTEX,
if_not_else::IF_NOT_ELSE,
if_then_some_else_none::IF_THEN_SOME_ELSE_NONE,
implicit_hasher::IMPLICIT_HASHER,
implicit_return::IMPLICIT_RETURN,
implicit_saturating_sub::IMPLICIT_SATURATING_SUB,
inconsistent_struct_constructor::INCONSISTENT_STRUCT_CONSTRUCTOR,
index_refutable_slice::INDEX_REFUTABLE_SLICE,
indexing_slicing::INDEXING_SLICING,
indexing_slicing::OUT_OF_BOUNDS_INDEXING,
infinite_iter::INFINITE_ITER,
infinite_iter::MAYBE_INFINITE_ITER,
inherent_impl::MULTIPLE_INHERENT_IMPL,
inherent_to_string::INHERENT_TO_STRING,
inherent_to_string::INHERENT_TO_STRING_SHADOW_DISPLAY,
init_numbered_fields::INIT_NUMBERED_FIELDS,
inline_fn_without_body::INLINE_FN_WITHOUT_BODY,
int_plus_one::INT_PLUS_ONE,
integer_division::INTEGER_DIVISION,
invalid_upcast_comparisons::INVALID_UPCAST_COMPARISONS,
items_after_statements::ITEMS_AFTER_STATEMENTS,
iter_not_returning_iterator::ITER_NOT_RETURNING_ITERATOR,
large_const_arrays::LARGE_CONST_ARRAYS,
large_enum_variant::LARGE_ENUM_VARIANT,
large_stack_arrays::LARGE_STACK_ARRAYS,
len_zero::COMPARISON_TO_EMPTY,
len_zero::LEN_WITHOUT_IS_EMPTY,
len_zero::LEN_ZERO,
let_if_seq::USELESS_LET_IF_SEQ,
let_underscore::LET_UNDERSCORE_DROP,
let_underscore::LET_UNDERSCORE_LOCK,
let_underscore::LET_UNDERSCORE_MUST_USE,
lifetimes::EXTRA_UNUSED_LIFETIMES,
lifetimes::NEEDLESS_LIFETIMES,
literal_representation::DECIMAL_LITERAL_REPRESENTATION,
literal_representation::INCONSISTENT_DIGIT_GROUPING,
literal_representation::LARGE_DIGIT_GROUPS,
literal_representation::MISTYPED_LITERAL_SUFFIXES,
literal_representation::UNREADABLE_LITERAL,
literal_representation::UNUSUAL_BYTE_GROUPINGS,
loops::EMPTY_LOOP,
loops::EXPLICIT_COUNTER_LOOP,
loops::EXPLICIT_INTO_ITER_LOOP,
loops::EXPLICIT_ITER_LOOP,
loops::FOR_KV_MAP,
loops::FOR_LOOPS_OVER_FALLIBLES,
loops::ITER_NEXT_LOOP,
loops::MANUAL_FLATTEN,
loops::MANUAL_MEMCPY,
loops::MISSING_SPIN_LOOP,
loops::MUT_RANGE_BOUND,
loops::NEEDLESS_COLLECT,
loops::NEEDLESS_RANGE_LOOP,
loops::NEVER_LOOP,
loops::SAME_ITEM_PUSH,
loops::SINGLE_ELEMENT_LOOP,
loops::WHILE_IMMUTABLE_CONDITION,
loops::WHILE_LET_LOOP,
loops::WHILE_LET_ON_ITERATOR,
macro_use::MACRO_USE_IMPORTS,
main_recursion::MAIN_RECURSION,
manual_assert::MANUAL_ASSERT,
manual_async_fn::MANUAL_ASYNC_FN,
manual_bits::MANUAL_BITS,
manual_map::MANUAL_MAP,
manual_non_exhaustive::MANUAL_NON_EXHAUSTIVE,
manual_ok_or::MANUAL_OK_OR,
manual_strip::MANUAL_STRIP,
manual_unwrap_or::MANUAL_UNWRAP_OR,
map_clone::MAP_CLONE,
map_err_ignore::MAP_ERR_IGNORE,
map_unit_fn::OPTION_MAP_UNIT_FN,
map_unit_fn::RESULT_MAP_UNIT_FN,
match_on_vec_items::MATCH_ON_VEC_ITEMS,
match_result_ok::MATCH_RESULT_OK,
match_str_case_mismatch::MATCH_STR_CASE_MISMATCH,
matches::INFALLIBLE_DESTRUCTURING_MATCH,
matches::MATCH_AS_REF,
matches::MATCH_BOOL,
matches::MATCH_LIKE_MATCHES_MACRO,
matches::MATCH_OVERLAPPING_ARM,
matches::MATCH_REF_PATS,
matches::MATCH_SAME_ARMS,
matches::MATCH_SINGLE_BINDING,
matches::MATCH_WILDCARD_FOR_SINGLE_VARIANTS,
matches::MATCH_WILD_ERR_ARM,
matches::NEEDLESS_MATCH,
matches::REDUNDANT_PATTERN_MATCHING,
matches::REST_PAT_IN_FULLY_BOUND_STRUCTS,
matches::SINGLE_MATCH,
matches::SINGLE_MATCH_ELSE,
matches::WILDCARD_ENUM_MATCH_ARM,
matches::WILDCARD_IN_OR_PATTERNS,
mem_forget::MEM_FORGET,
mem_replace::MEM_REPLACE_OPTION_WITH_NONE,
mem_replace::MEM_REPLACE_WITH_DEFAULT,
mem_replace::MEM_REPLACE_WITH_UNINIT,
methods::BIND_INSTEAD_OF_MAP,
methods::BYTES_NTH,
methods::CHARS_LAST_CMP,
methods::CHARS_NEXT_CMP,
methods::CLONED_INSTEAD_OF_COPIED,
methods::CLONE_DOUBLE_REF,
methods::CLONE_ON_COPY,
methods::CLONE_ON_REF_PTR,
methods::EXPECT_FUN_CALL,
methods::EXPECT_USED,
methods::EXTEND_WITH_DRAIN,
methods::FILETYPE_IS_FILE,
methods::FILTER_MAP_IDENTITY,
methods::FILTER_MAP_NEXT,
methods::FILTER_NEXT,
methods::FLAT_MAP_IDENTITY,
methods::FLAT_MAP_OPTION,
methods::FROM_ITER_INSTEAD_OF_COLLECT,
methods::GET_UNWRAP,
methods::IMPLICIT_CLONE,
methods::INEFFICIENT_TO_STRING,
methods::INSPECT_FOR_EACH,
methods::INTO_ITER_ON_REF,
methods::ITERATOR_STEP_BY_ZERO,
methods::ITER_CLONED_COLLECT,
methods::ITER_COUNT,
methods::ITER_NEXT_SLICE,
methods::ITER_NTH,
methods::ITER_NTH_ZERO,
methods::ITER_OVEREAGER_CLONED,
methods::ITER_SKIP_NEXT,
methods::ITER_WITH_DRAIN,
methods::MANUAL_FILTER_MAP,
methods::MANUAL_FIND_MAP,
methods::MANUAL_SATURATING_ARITHMETIC,
methods::MANUAL_SPLIT_ONCE,
methods::MANUAL_STR_REPEAT,
methods::MAP_COLLECT_RESULT_UNIT,
methods::MAP_FLATTEN,
methods::MAP_IDENTITY,
methods::MAP_UNWRAP_OR,
methods::NEEDLESS_SPLITN,
methods::NEW_RET_NO_SELF,
methods::OK_EXPECT,
methods::OPTION_AS_REF_DEREF,
methods::OPTION_FILTER_MAP,
methods::OPTION_MAP_OR_NONE,
methods::OR_FUN_CALL,
methods::OR_THEN_UNWRAP,
methods::RESULT_MAP_OR_INTO_OPTION,
methods::SEARCH_IS_SOME,
methods::SHOULD_IMPLEMENT_TRAIT,
methods::SINGLE_CHAR_ADD_STR,
methods::SINGLE_CHAR_PATTERN,
methods::SKIP_WHILE_NEXT,
methods::STRING_EXTEND_CHARS,
methods::SUSPICIOUS_MAP,
methods::SUSPICIOUS_SPLITN,
methods::UNINIT_ASSUMED_INIT,
methods::UNNECESSARY_FILTER_MAP,
methods::UNNECESSARY_FIND_MAP,
methods::UNNECESSARY_FOLD,
methods::UNNECESSARY_LAZY_EVALUATIONS,
methods::UNNECESSARY_TO_OWNED,
methods::UNWRAP_OR_ELSE_DEFAULT,
methods::UNWRAP_USED,
methods::USELESS_ASREF,
methods::WRONG_SELF_CONVENTION,
methods::ZST_OFFSET,
minmax::MIN_MAX,
misc::CMP_NAN,
misc::CMP_OWNED,
misc::FLOAT_CMP,
misc::FLOAT_CMP_CONST,
misc::MODULO_ONE,
misc::SHORT_CIRCUIT_STATEMENT,
misc::TOPLEVEL_REF_ARG,
misc::USED_UNDERSCORE_BINDING,
misc::ZERO_PTR,
misc_early::BUILTIN_TYPE_SHADOW,
misc_early::DOUBLE_NEG,
misc_early::DUPLICATE_UNDERSCORE_ARGUMENT,
misc_early::MIXED_CASE_HEX_LITERALS,
misc_early::REDUNDANT_PATTERN,
misc_early::SEPARATED_LITERAL_SUFFIX,
misc_early::UNNEEDED_FIELD_PATTERN,
misc_early::UNNEEDED_WILDCARD_PATTERN,
misc_early::UNSEPARATED_LITERAL_SUFFIX,
misc_early::ZERO_PREFIXED_LITERAL,
missing_const_for_fn::MISSING_CONST_FOR_FN,
missing_doc::MISSING_DOCS_IN_PRIVATE_ITEMS,
missing_enforced_import_rename::MISSING_ENFORCED_IMPORT_RENAMES,
missing_inline::MISSING_INLINE_IN_PUBLIC_ITEMS,
module_style::MOD_MODULE_FILES,
module_style::SELF_NAMED_MODULE_FILES,
modulo_arithmetic::MODULO_ARITHMETIC,
mut_key::MUTABLE_KEY_TYPE,
mut_mut::MUT_MUT,
mut_mutex_lock::MUT_MUTEX_LOCK,
mut_reference::UNNECESSARY_MUT_PASSED,
mutable_debug_assertion::DEBUG_ASSERT_WITH_MUT_CALL,
mutex_atomic::MUTEX_ATOMIC,
mutex_atomic::MUTEX_INTEGER,
needless_arbitrary_self_type::NEEDLESS_ARBITRARY_SELF_TYPE,
needless_bitwise_bool::NEEDLESS_BITWISE_BOOL,
needless_bool::BOOL_COMPARISON,
needless_bool::NEEDLESS_BOOL,
needless_borrowed_ref::NEEDLESS_BORROWED_REFERENCE,
needless_continue::NEEDLESS_CONTINUE,
needless_for_each::NEEDLESS_FOR_EACH,
needless_late_init::NEEDLESS_LATE_INIT,
needless_option_as_deref::NEEDLESS_OPTION_AS_DEREF,
needless_pass_by_value::NEEDLESS_PASS_BY_VALUE,
needless_question_mark::NEEDLESS_QUESTION_MARK,
needless_update::NEEDLESS_UPDATE,
neg_cmp_op_on_partial_ord::NEG_CMP_OP_ON_PARTIAL_ORD,
neg_multiply::NEG_MULTIPLY,
new_without_default::NEW_WITHOUT_DEFAULT,
no_effect::NO_EFFECT,
no_effect::NO_EFFECT_UNDERSCORE_BINDING,
no_effect::UNNECESSARY_OPERATION,
non_copy_const::BORROW_INTERIOR_MUTABLE_CONST,
non_copy_const::DECLARE_INTERIOR_MUTABLE_CONST,
non_expressive_names::JUST_UNDERSCORES_AND_DIGITS,
non_expressive_names::MANY_SINGLE_CHAR_NAMES,
non_expressive_names::SIMILAR_NAMES,
non_octal_unix_permissions::NON_OCTAL_UNIX_PERMISSIONS,
non_send_fields_in_send_ty::NON_SEND_FIELDS_IN_SEND_TY,
nonstandard_macro_braces::NONSTANDARD_MACRO_BRACES,
octal_escapes::OCTAL_ESCAPES,
only_used_in_recursion::ONLY_USED_IN_RECURSION,
open_options::NONSENSICAL_OPEN_OPTIONS,
option_env_unwrap::OPTION_ENV_UNWRAP,
option_if_let_else::OPTION_IF_LET_ELSE,
overflow_check_conditional::OVERFLOW_CHECK_CONDITIONAL,
panic_in_result_fn::PANIC_IN_RESULT_FN,
panic_unimplemented::PANIC,
panic_unimplemented::TODO,
panic_unimplemented::UNIMPLEMENTED,
panic_unimplemented::UNREACHABLE,
partialeq_ne_impl::PARTIALEQ_NE_IMPL,
pass_by_ref_or_value::LARGE_TYPES_PASSED_BY_VALUE,
pass_by_ref_or_value::TRIVIALLY_COPY_PASS_BY_REF,
path_buf_push_overwrite::PATH_BUF_PUSH_OVERWRITE,
pattern_type_mismatch::PATTERN_TYPE_MISMATCH,
precedence::PRECEDENCE,
ptr::CMP_NULL,
ptr::INVALID_NULL_PTR_USAGE,
ptr::MUT_FROM_REF,
ptr::PTR_ARG,
ptr_eq::PTR_EQ,
ptr_offset_with_cast::PTR_OFFSET_WITH_CAST,
question_mark::QUESTION_MARK,
ranges::MANUAL_RANGE_CONTAINS,
ranges::RANGE_MINUS_ONE,
ranges::RANGE_PLUS_ONE,
ranges::RANGE_ZIP_WITH_LEN,
ranges::REVERSED_EMPTY_RANGES,
redundant_clone::REDUNDANT_CLONE,
redundant_closure_call::REDUNDANT_CLOSURE_CALL,
redundant_else::REDUNDANT_ELSE,
redundant_field_names::REDUNDANT_FIELD_NAMES,
redundant_pub_crate::REDUNDANT_PUB_CRATE,
redundant_slicing::DEREF_BY_SLICING,
redundant_slicing::REDUNDANT_SLICING,
redundant_static_lifetimes::REDUNDANT_STATIC_LIFETIMES,
ref_option_ref::REF_OPTION_REF,
reference::DEREF_ADDROF,
regex::INVALID_REGEX,
regex::TRIVIAL_REGEX,
repeat_once::REPEAT_ONCE,
return_self_not_must_use::RETURN_SELF_NOT_MUST_USE,
returns::LET_AND_RETURN,
returns::NEEDLESS_RETURN,
same_name_method::SAME_NAME_METHOD,
self_assignment::SELF_ASSIGNMENT,
self_named_constructors::SELF_NAMED_CONSTRUCTORS,
semicolon_if_nothing_returned::SEMICOLON_IF_NOTHING_RETURNED,
serde_api::SERDE_API_MISUSE,
shadow::SHADOW_REUSE,
shadow::SHADOW_SAME,
shadow::SHADOW_UNRELATED,
single_char_lifetime_names::SINGLE_CHAR_LIFETIME_NAMES,
single_component_path_imports::SINGLE_COMPONENT_PATH_IMPORTS,
size_of_in_element_count::SIZE_OF_IN_ELEMENT_COUNT,
slow_vector_initialization::SLOW_VECTOR_INITIALIZATION,
stable_sort_primitive::STABLE_SORT_PRIMITIVE,
strings::STRING_ADD,
strings::STRING_ADD_ASSIGN,
strings::STRING_FROM_UTF8_AS_BYTES,
strings::STRING_LIT_AS_BYTES,
strings::STRING_SLICE,
strings::STRING_TO_STRING,
strings::STR_TO_STRING,
strlen_on_c_strings::STRLEN_ON_C_STRINGS,
suspicious_operation_groupings::SUSPICIOUS_OPERATION_GROUPINGS,
suspicious_trait_impl::SUSPICIOUS_ARITHMETIC_IMPL,
suspicious_trait_impl::SUSPICIOUS_OP_ASSIGN_IMPL,
swap::ALMOST_SWAPPED,
swap::MANUAL_SWAP,
tabs_in_doc_comments::TABS_IN_DOC_COMMENTS,
temporary_assignment::TEMPORARY_ASSIGNMENT,
to_digit_is_some::TO_DIGIT_IS_SOME,
trailing_empty_array::TRAILING_EMPTY_ARRAY,
trait_bounds::TRAIT_DUPLICATION_IN_BOUNDS,
trait_bounds::TYPE_REPETITION_IN_BOUNDS,
transmute::CROSSPOINTER_TRANSMUTE,
transmute::TRANSMUTES_EXPRESSIBLE_AS_PTR_CASTS,
transmute::TRANSMUTE_BYTES_TO_STR,
transmute::TRANSMUTE_FLOAT_TO_INT,
transmute::TRANSMUTE_INT_TO_BOOL,
transmute::TRANSMUTE_INT_TO_CHAR,
transmute::TRANSMUTE_INT_TO_FLOAT,
transmute::TRANSMUTE_NUM_TO_BYTES,
transmute::TRANSMUTE_PTR_TO_PTR,
transmute::TRANSMUTE_PTR_TO_REF,
transmute::TRANSMUTE_UNDEFINED_REPR,
transmute::UNSOUND_COLLECTION_TRANSMUTE,
transmute::USELESS_TRANSMUTE,
transmute::WRONG_TRANSMUTE,
transmuting_null::TRANSMUTING_NULL,
try_err::TRY_ERR,
types::BORROWED_BOX,
types::BOX_COLLECTION,
types::LINKEDLIST,
types::OPTION_OPTION,
types::RC_BUFFER,
types::RC_MUTEX,
types::REDUNDANT_ALLOCATION,
types::TYPE_COMPLEXITY,
types::VEC_BOX,
undocumented_unsafe_blocks::UNDOCUMENTED_UNSAFE_BLOCKS,
undropped_manually_drops::UNDROPPED_MANUALLY_DROPS,
unicode::INVISIBLE_CHARACTERS,
unicode::NON_ASCII_LITERAL,
unicode::UNICODE_NOT_NFC,
uninit_vec::UNINIT_VEC,
unit_hash::UNIT_HASH,
unit_return_expecting_ord::UNIT_RETURN_EXPECTING_ORD,
unit_types::LET_UNIT_VALUE,
unit_types::UNIT_ARG,
unit_types::UNIT_CMP,
unnamed_address::FN_ADDRESS_COMPARISONS,
unnamed_address::VTABLE_ADDRESS_COMPARISONS,
unnecessary_self_imports::UNNECESSARY_SELF_IMPORTS,
unnecessary_sort_by::UNNECESSARY_SORT_BY,
unnecessary_wraps::UNNECESSARY_WRAPS,
unnested_or_patterns::UNNESTED_OR_PATTERNS,
unsafe_removed_from_name::UNSAFE_REMOVED_FROM_NAME,
unused_async::UNUSED_ASYNC,
unused_io_amount::UNUSED_IO_AMOUNT,
unused_self::UNUSED_SELF,
unused_unit::UNUSED_UNIT,
unwrap::PANICKING_UNWRAP,
unwrap::UNNECESSARY_UNWRAP,
unwrap_in_result::UNWRAP_IN_RESULT,
upper_case_acronyms::UPPER_CASE_ACRONYMS,
use_self::USE_SELF,
useless_conversion::USELESS_CONVERSION,
vec::USELESS_VEC,
vec_init_then_push::VEC_INIT_THEN_PUSH,
vec_resize_to_zero::VEC_RESIZE_TO_ZERO,
verbose_file_reads::VERBOSE_FILE_READS,
wildcard_imports::ENUM_GLOB_USE,
wildcard_imports::WILDCARD_IMPORTS,
write::PRINTLN_EMPTY_STRING,
write::PRINT_LITERAL,
write::PRINT_STDERR,
write::PRINT_STDOUT,
write::PRINT_WITH_NEWLINE,
write::USE | collapsible_if::COLLAPSIBLE_ELSE_IF,
collapsible_if::COLLAPSIBLE_IF,
collapsible_match::COLLAPSIBLE_MATCH, | random_line_split |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::rc::Rc;
use std::cell::Cell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::u32;
extern crate chariot_drs as lib;
use lib::DrsFile as Archive;
extern crate number_prefix;
use number_prefix::{binary_prefix, Prefixed, Standalone};
extern crate gdk;
extern crate gtk;
use gtk::prelude::Inhibit;
use gtk::{Builder, Button, Entry as EntryBox, FileChooserDialog, ListStore, TreeView,
TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None);
let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn | (
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String {
s
}
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
let t: TreeView = builder.get_object("entryinfo_tree").unwrap();
let sel = t.get_selection();
sel.set_mode(gtk::SelectionMode::Multiple);
t
};
window.set_title("DRS Studio");
window.set_position(gtk::WindowPosition::Center);
window.get_preferred_width();
window.set_default_size(1440, 900);
let ei_store = ListStore::new(&[Type::String, Type::String, Type::String, Type::String]);
entryinfo_tree.set_model(Some(&ei_store));
entryinfo_tree.set_headers_visible(true);
add_column!(entryinfo_tree, "ID", Column::ID.into());
add_column!(entryinfo_tree, "Type", Column::Type.into());
add_column!(entryinfo_tree, "Size", Column::Size.into());
add_column!(entryinfo_tree, "Offset", Column::Offset.into());
setup_tree(entryinfo_tree.clone(), extract_button.clone());
let archive: Rc<Cell<Option<Archive>>> = Rc::new(Cell::new(None));
enable_sortable_cols(&ei_store, &entryinfo_tree);
enable_archive_button(
archive.clone(),
extract_button.clone(),
archive_button.clone(),
archive_entrybox.clone(),
ei_store,
);
enable_extract_button(archive.clone(), extract_button.clone(), entryinfo_tree);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
window.show_all();
gtk::main();
}
| select_dir_dialog | identifier_name |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::rc::Rc;
use std::cell::Cell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::u32;
extern crate chariot_drs as lib;
use lib::DrsFile as Archive;
extern crate number_prefix;
use number_prefix::{binary_prefix, Prefixed, Standalone};
extern crate gdk;
extern crate gtk;
use gtk::prelude::Inhibit;
use gtk::{Builder, Button, Entry as EntryBox, FileChooserDialog, ListStore, TreeView,
TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None); | let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn select_dir_dialog(
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String {
s
}
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
let t: TreeView = builder.get_object("entryinfo_tree").unwrap();
let sel = t.get_selection();
sel.set_mode(gtk::SelectionMode::Multiple);
t
};
window.set_title("DRS Studio");
window.set_position(gtk::WindowPosition::Center);
window.get_preferred_width();
window.set_default_size(1440, 900);
let ei_store = ListStore::new(&[Type::String, Type::String, Type::String, Type::String]);
entryinfo_tree.set_model(Some(&ei_store));
entryinfo_tree.set_headers_visible(true);
add_column!(entryinfo_tree, "ID", Column::ID.into());
add_column!(entryinfo_tree, "Type", Column::Type.into());
add_column!(entryinfo_tree, "Size", Column::Size.into());
add_column!(entryinfo_tree, "Offset", Column::Offset.into());
setup_tree(entryinfo_tree.clone(), extract_button.clone());
let archive: Rc<Cell<Option<Archive>>> = Rc::new(Cell::new(None));
enable_sortable_cols(&ei_store, &entryinfo_tree);
enable_archive_button(
archive.clone(),
extract_button.clone(),
archive_button.clone(),
archive_entrybox.clone(),
ei_store,
);
enable_extract_button(archive.clone(), extract_button.clone(), entryinfo_tree);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
window.show_all();
gtk::main();
} | random_line_split |
|
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::rc::Rc;
use std::cell::Cell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::u32;
extern crate chariot_drs as lib;
use lib::DrsFile as Archive;
extern crate number_prefix;
use number_prefix::{binary_prefix, Prefixed, Standalone};
extern crate gdk;
extern crate gtk;
use gtk::prelude::Inhibit;
use gtk::{Builder, Button, Entry as EntryBox, FileChooserDialog, ListStore, TreeView,
TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None);
let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn select_dir_dialog(
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String |
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
let t: TreeView = builder.get_object("entryinfo_tree").unwrap();
let sel = t.get_selection();
sel.set_mode(gtk::SelectionMode::Multiple);
t
};
window.set_title("DRS Studio");
window.set_position(gtk::WindowPosition::Center);
window.get_preferred_width();
window.set_default_size(1440, 900);
let ei_store = ListStore::new(&[Type::String, Type::String, Type::String, Type::String]);
entryinfo_tree.set_model(Some(&ei_store));
entryinfo_tree.set_headers_visible(true);
add_column!(entryinfo_tree, "ID", Column::ID.into());
add_column!(entryinfo_tree, "Type", Column::Type.into());
add_column!(entryinfo_tree, "Size", Column::Size.into());
add_column!(entryinfo_tree, "Offset", Column::Offset.into());
setup_tree(entryinfo_tree.clone(), extract_button.clone());
let archive: Rc<Cell<Option<Archive>>> = Rc::new(Cell::new(None));
enable_sortable_cols(&ei_store, &entryinfo_tree);
enable_archive_button(
archive.clone(),
extract_button.clone(),
archive_button.clone(),
archive_entrybox.clone(),
ei_store,
);
enable_extract_button(archive.clone(), extract_button.clone(), entryinfo_tree);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
window.show_all();
gtk::main();
}
| {
s
} | identifier_body |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1!= byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn | (cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | craft_cbc_admin_token | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.